repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
michigraber/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
vlarson/class-scripts | arm_analysis/analyze_arm_obs.py | 1 | 22855 | # -*- coding: utf-8 -*-
"""
Analyze ARM observations.
"""
# Import libraries
from __future__ import division # in order to divide two integers
from numpy import fmax, arange, meshgrid, ix_, sqrt, mean, var, std, sum
from numpy import linspace, asarray, sort, amin, zeros, isclose, count_nonzero
from numpy.ma import masked_where, filled
from numpy.ma import MaskedArray
from math import pi, log
from scipy.stats import norm, lognorm, skew, spearmanr
from scipy.stats.mstats import rankdata
import matplotlib.pyplot as plt
from scipy.io import netcdf
from arm_utilities import plotSfcRad, findTruncNormalRoots, findKSDn, calcMeanAlbedo
import pdb
import sys
# Point to directory containing ARM observations
data_dir = '/home/studi/Larson/arm_data_files/'
##################################
#
# Plot surface radiative fields
#
##################################
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131204.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131205.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131206.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131207.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131208.000000.custom.cdf'
# SDN showed a few clouds on 20131215:
beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131215.000000.custom.cdf'
# 20131217 had essentially clear skies
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131217.000000.custom.cdf'
#beflux_file = data_dir + 'sgpbeflux1longC1.c1.20131218.000000.custom.cdf'
# Impose a threshold on short_direct_normal to get rid of nighttime values
min_sdn = 10
#plotSfcRad(beflux_file, min_sdn)
##################################
#
# Plot radar obs
#
##################################
# Default values
# Grid level at which to plot time series and histogram
range_level = 117
# Time at which profile of reflectivity is plotted
time_of_cloud = 69000
# Number of profiles of reflectivity to be plotted
numProfiles = 5
# Indices for range of altitudes for time-height plots
height_range = arange(0,200)
# Indices for range of times for time-height plots
beginTimeOfPlot = 1
endTimeOfPlot = 86399
# Range of times in seconds for vertical overlap analysis
beginTimeOfCloud = 70000
endTimeOfCloud = 80000
# Range of altitudes in meters for vertical overlap analysis
cloudBaseHeight = 2000
cloudTopHeight = 3000
# If lArscl = True, then we want to use the ARSCL radar retrieval
radarType = "arscl"
# Impose a threshold on reflectivity_copol to get rid of noise values
if (radarType == "arscl"):
minThreshRefl = -60
else:
minThreshRefl = -30
# An estimate of within-cloud liquid water path, in g/m**2
meanLWP = 10
# Now overwrite defaults with specialized values for particular days
#date = 20131204 # Shallow Sc
#date = 20131205 # Deep Cu
#date = 20131206 # Shallow Sc, bad data?
#date = 20131207 # Sc/Cu from 3 to 4 km
#date = 20131208 # Low drizzling Cu
#date = 20131215 # No clouds
#date = 20131217 # Noise
date = 20150607 # Shallow Cu and some mid level clouds
#date = 20150609 # Shallow Cu
#date = 20150627 # Shallow Cu
if date == 20131204:
# Radar showed low stratus on 20131204:
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131204.000000.custom.nc'
# Indices for range of altitudes for time-height plots
height_range = arange(0,100)
# Grid level at which to plot time series and histogram
range_level = 18
elif date == 20131205:
# Radar could see strong clouds up to 8 km on 20131205:
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131205.000002.custom.nc'
# Grid level at which to plot time series and histogram
range_level = 167
# Indices for range of altitudes for time-height plots
height_range = arange(50,250)
elif date == 20131206:
# Radar could see clouds up to 2 km on 20131206:
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131206.000000.custom.nc'
# Grid level at which to plot time series and histogram
range_level = 45
# Time and time step at which profile of reflectivity is plotted
time_of_cloud = 75000
# Indices for range of altitudes for time-height plots
height_range = arange(1,100)
elif date == 20131207:
# Radar could see Sc/Cu clouds from 3 km to 4 km on 20131207:
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131207.000001.custom.nc'
# Grid level at which to plot time series and histogram
range_level = 110
elif date == 20131208:
# Radar saw low drizzling cumulus on 20131208:
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131208.000003.custom.nc'
# Indices for range of altitudes for time-height plots
height_range = arange(0,35)
# Grid level at which to plot time series and histogram
range_level = 9
# Impose a threshold on reflectivity_copol to get rid of nighttime values
minThreshRefl = -40
elif date == 20131215:
# Radar couldn't see clouds on 20131215:
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131215.000003.custom.nc'
elif date == 20131217:
# 20131217 had essentially clear skies
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20131217.000003.custom.nc'
elif date == 20150607:
radarType = "arscl"
# There should have been lots of clouds, but ARSCL could see few
if ( radarType == "arscl" ):
radar_refl_file = data_dir + 'sgparsclkazr1kolliasC1.c1.20150607.000000.nc'
elif ( radarType == "kazrCorge" ):
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20150607.000000.nc'
elif ( radarType == "kazrCormd" ):
radar_refl_file = data_dir + 'sgpkazrcormdC1.c1.20150607.000000.nc'
# Grid level at which to plot time series and histogram
range_level = 75 #80
# Indices for range of altitudes for time-height plots
height_range = arange(0,100)
beginTimeOfPlot = 68000
endTimeOfPlot = 80000
# Time at which profile of reflectivity is plotted
time_of_cloud = 71000 #78410 #78800 #78450
# Range of times in seconds for vertical overlap analysis
beginTimeOfCloud = 70000 #70000#78000
endTimeOfCloud = 79000 #72000#79000
# Range of altitudes in meters for vertical overlap analysis
cloudBaseHeight = 2250#2500
cloudTopHeight = 2600#2800
# An estimate of within-cloud liquid water path, in g/m**2
meanLWP = 10
elif date == 20150609:
radarType = "arscl"
# Radar could see strong clouds up to 8 km on 20131205:
if ( radarType == "arscl" ):
radar_refl_file = data_dir + 'sgparsclkazr1kolliasC1.c1.20150609.000000.nc'
elif ( radarType == "kazrCorge" ):
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20150609.000003.nc'
elif ( radarType == "kazrCormd" ):
radar_refl_file = data_dir + 'sgpkazrcormdC1.c1.20150609.000003.nc'
# Grid level at which to plot time series and histogram
range_level = 100
# Indices for range of altitudes for time-height plots
height_range = arange(0,150)
# Time and time step at which profile of reflectivity is plotted
time_of_cloud = 66000 #76000
# Range of times in seconds for vertical overlap analysis
beginTimeOfCloud = 63000
endTimeOfCloud = 78000
# Range of altitudes in meters for vertical overlap analysis
cloudBaseHeight = 2600
cloudTopHeight = 3500
# An estimate of within-cloud liquid water path, in g/m**2
meanLWP = 8
elif date == 20150627:
radarType = "arscl"
# Radar could see strong clouds up to 8 km on 20131205:
if ( radarType == "arscl" ):
radar_refl_file = data_dir + 'sgparsclkazr1kolliasC1.c1.20150627.000000.nc'
elif ( radarType == "kazrCorge" ):
radar_refl_file = data_dir + 'sgpkazrcorgeC1.c1.20150627.000000.nc'
elif ( radarType == "kazrCormd" ):
radar_refl_file = data_dir + 'sgpkazrcormdC1.c1.20150627.000000.nc'
# Grid level at which to plot time series and histogram
range_level = 95
# Indices for range of altitudes for time-height plots
height_range = arange(0,200)
# Time and time step at which profile of reflectivity is plotted
time_of_cloud = 67200 # At this time, the profile is highly correlated
time_of_cloud = 69400 # At this time, the profile is not well correlated
time_of_cloud = 66300 #75000 #66000
# Range of times in seconds for vertical overlap analysis
beginTimeOfCloud = 63000
endTimeOfCloud = 78000
# Range of altitudes in meters for vertical overlap analysis
cloudBaseHeight = 2200
cloudTopHeight = 3500
# An estimate of within-cloud liquid water path, in g/m**2
meanLWP = 20
else:
print "Wrong date"
radar_refl_nc = netcdf.netcdf_file(radar_refl_file, 'r')
time_offset_radar_refl = radar_refl_nc.variables['time_offset']
# Compute beginning and ending time steps for time series and time-height plots
beginTimestepOfPlot = (abs(time_offset_radar_refl[:]-beginTimeOfPlot)).argmin()
endTimestepOfPlot = (abs(time_offset_radar_refl[:]-endTimeOfPlot)).argmin()
time_range = arange(beginTimestepOfPlot,endTimestepOfPlot)
# Compute time step for profile of snapshot
timestep_of_cloud = (abs(time_offset_radar_refl[:]-time_of_cloud)).argmin()
# Compute beginning and ending time steps for block of cloud for overlap analysis
beginTimestepOfCloud = (abs(time_offset_radar_refl[:]-beginTimeOfCloud)).argmin()
endTimestepOfCloud = (abs(time_offset_radar_refl[:]-endTimeOfCloud)).argmin()
timestepRangeCloud = arange(beginTimestepOfCloud,endTimestepOfCloud)
if ( radarType == "arscl" ):
height = radar_refl_nc.variables['height'].data
else:
height = radar_refl_nc.variables['range'].data
#range_gate_spacing = 29.979246
#height = arange(0, 676*range_gate_spacing-1, range_gate_spacing)
# Compute top and bottom grid levels for block of cloud for overlap analysis
cloudBaseLevel = (abs(height[:]-cloudBaseHeight)).argmin()
cloudTopLevel = (abs(height[:]-cloudTopHeight)).argmin()
levelRangeCloud = arange(cloudBaseLevel,cloudTopLevel)
lenTimestepRangeCloud = len(timestepRangeCloud)
lenLevelRangeCloud = len(levelRangeCloud)
if ( radarType == "arscl" ):
# To extract the data part of the object, use [:,:] instead of data
reflectivity_copol = radar_refl_nc.variables['reflectivity_best_estimate'].data
# Pull the quality control flag from netcdf data
qcRefl = radar_refl_nc.variables['qc_reflectivity_best_estimate'].data
# Mask out reflectivity values outside of cloud or rain
reflectivity_copol = masked_where( qcRefl > 0, reflectivity_copol )
else:
reflectivity_copol = radar_refl_nc.variables['reflectivity_copol'].data
#pdb.set_trace()
# The numpy ix_ function is needed to extract the right part of the matrix
reflCopol = reflectivity_copol[ix_(time_range,height_range)]
# Block of cloud values for overlap analysis
reflCloudBlock = reflectivity_copol[ix_(timestepRangeCloud,levelRangeCloud)]
#pdb.set_trace()
# Replace small values with threshold in order to reduce the range of values to plot
#reflCopol = fmax(minThreshRefl,reflCopol[:])
#pdb.set_trace()
#reflCompressed = reflCopol[:,range_level].compressed()
reflCompressed = reflCloudBlock[:,range_level-cloudBaseLevel].compressed()
#dfser['ecdf_r']=(len(dfser)-dfser['rank']+1)/len(dfser)
lenReflCompressed = len(reflCompressed)
# Check whether there is cloud at the height level chosen for plotting time series
if ( len( reflCompressed ) == 0 ):
print "ERROR: Reflectivity time series at level %s has no values above the threshold!!!" %range_level
sys.exit(1)
# Smallest and largest values of reflectivity, used for plots below
minRefl = min( reflCompressed )
maxRefl = max( reflCompressed )
reflRange = linspace(minRefl,maxRefl)
#pdb.set_trace()
# Compute effect of vertical overlap on radiation
# To do so, sum reflectivity from each profile, using original and sorted cloud data
# Compute the standard deviation in the sum
reflCloudBlockMin = amin(reflCloudBlock)
reflCloudBlockOffset = reflCloudBlock - reflCloudBlockMin
# Sum reflectivities in vertical, and then compute within-cloud mean
meanReflCloudBlock = mean(sum(reflCloudBlockOffset,axis=1))
reflCloudBlockFilled = filled(reflCloudBlockOffset,fill_value=0)
# Compute maximal overlap by sorting each altitude level individually
reflCloudBlockFilledSorted = zeros((lenTimestepRangeCloud,lenLevelRangeCloud))
for col in range(0,lenLevelRangeCloud):
reflCloudBlockFilledSorted[:,col] = sort(reflCloudBlockFilled[:,col])
# Assertion check
if ( not( isclose( mean( mean(reflCloudBlockFilled) ), mean( mean(reflCloudBlockFilledSorted) ) ) ) ):
print "ERROR: Computing maximal overlap failed!!! %s != %s" \
% (mean( mean(reflCloudBlockFilled) ) , mean( mean(reflCloudBlockFilledSorted) ))
sys.exit(1)
#pdb.set_trace()
meanAlbedoUnsorted, LWPUnsorted \
= calcMeanAlbedo(reflCloudBlockFilled, meanReflCloudBlock, meanLWP)
meanAlbedoSorted, LWPSorted \
= calcMeanAlbedo(reflCloudBlockFilledSorted, meanReflCloudBlock, meanLWP)
# Now consider a case in which there is no within-cloud variability
# mean within-cloud optical depth
tauWc0 = 0.15 * meanLWP
# mean within-cloud albedo
albedoWc0 = tauWc0 / (9.0 + tauWc0)
# Find cloud cover
sumReflCloudBlockFilled = sum(reflCloudBlockFilled,axis=1)
cloudCover = count_nonzero(sumReflCloudBlockFilled)/len(sumReflCloudBlockFilled)
# mean albedo, including clear air
meanAlbedo0 = albedoWc0 * cloudCover
# Assertion check
if ( not( isclose( meanReflCloudBlock * cloudCover , mean(sumReflCloudBlockFilled) ) ) ):
print "ERROR: Computing maximal overlap failed!!! %s != %s" \
% ( meanReflCloudBlock * cloudCover , mean(sumReflCloudBlockFilled) )
sys.exit(1)
#pdb.set_trace()
print " Unsorted Sorted No within-cloud variability"
print "Mean Albedo: %.5f %.5f %.5f" %(meanAlbedoUnsorted, meanAlbedoSorted, meanAlbedo0)
print "Relative fractional difference: %.5f %.5f %.5f" \
%( 0,
(meanAlbedoUnsorted-meanAlbedoSorted)/meanAlbedoUnsorted,
(meanAlbedo0-meanAlbedoSorted)/meanAlbedoUnsorted
)
# Compute Spearman's rank correlation matrix
# among reflectivity at different vertical levels.
# I'm not sure about the following calculation because
# I don't understand a correlation of a masked array.
spearmanMatrix, spearmanPval = spearmanr(reflCloudBlock, axis=0)
print "Spearman rank correlation matrix:"
print spearmanMatrix
#exit
TIME, HEIGHT = meshgrid(height[height_range],
time_offset_radar_refl[time_range])
plt.ion() # Use interactive mode so that program continues when plot appears
plt.clf()
# either contourf or pcolormesh produces filled contours
radarContour = plt.pcolormesh(HEIGHT[:],TIME[:],reflCopol)
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = plt.colorbar(radarContour)
cbar.ax.set_ylabel('Reflectivity [dBZ]')
# Add the contour line levels to the colorbar
#cbar.add_lines(radarContour)
# Plot horizontal line corresponding to time series plot later
plt.plot( [ beginTimeOfPlot , endTimeOfPlot ],
[ height[range_level], height[range_level] ], 'k' )
#pdb.set_trace()
# Plot vertical line corresponding to histogram of reflectivity
plt.plot( [ time_of_cloud , time_of_cloud ],
[ height[height_range[0]], height[height_range[len(height_range)-1]] ], 'k' )
# Plot box corresponding to cloud box
plt.plot( [ beginTimeOfCloud , beginTimeOfCloud ], [ cloudBaseHeight, cloudTopHeight ], 'k' )
plt.plot( [ endTimeOfCloud , endTimeOfCloud ], [ cloudBaseHeight, cloudTopHeight ], 'k' )
plt.plot( [ beginTimeOfCloud , endTimeOfCloud ], [ cloudBaseHeight, cloudBaseHeight ], 'k' )
plt.plot( [ beginTimeOfCloud , endTimeOfCloud ], [ cloudTopHeight, cloudTopHeight ], 'k' )
plt.title('Radar reflectivity')
plt.xlabel('Time [' + time_offset_radar_refl.units + ']')
plt.ylabel('Altitude [m]')
plt.figure()
#plt.show()
#pdb.set_trace()
# uniformCloudBlock = close-up selection of contiguous cloud values.
# Each column is a different altitude.
uniformCloudBlock = zeros((lenTimestepRangeCloud,lenLevelRangeCloud))
for col in range(0,lenLevelRangeCloud):
uniformCloudBlock[:,col] = rankdata(reflCloudBlock[:,col]) / \
MaskedArray.count(reflCloudBlock[:,col])
uniformCloudBlock = masked_where( uniformCloudBlock == 0, uniformCloudBlock )
# I'm not sure if it's appropriate to rank first, then fill.
# So I'm not sure if this is correct.
uniformCloudBlockFilled = filled(uniformCloudBlock,fill_value=0)
plt.clf()
for idx in range(1,5):
plt.subplot(2,2,idx)
plt.plot(uniformCloudBlockFilled[:,5],uniformCloudBlockFilled[:,idx],'.')
plt.title('Copula')
plt.figure()
#pdb.set_trace()
#plt.ion() # Use interactive mode so that program continues when plot appears
plt.clf()
plt.subplot(121)
#pdb.set_trace()
for idx in range(0,numProfiles):
plt.plot(reflCopol[timestep_of_cloud-beginTimestepOfPlot+idx,levelRangeCloud],
height[levelRangeCloud],'-o')
plt.ylim(height[levelRangeCloud[0]], height[levelRangeCloud[len(levelRangeCloud)-1]])
plt.xlabel('Copolar radar reflectivity')
plt.ylabel('Altitude [m]')
plt.subplot(122)
for idx in range(0,numProfiles):
plt.plot(uniformCloudBlock[timestep_of_cloud-beginTimestepOfCloud+idx,:],
height[levelRangeCloud],'-o')
plt.ylim(height[levelRangeCloud[0]], height[levelRangeCloud[len(levelRangeCloud)-1]])
plt.xlabel('Uniform distribution of reflectivity')
plt.ylabel('Altitude [m]')
plt.figure()
plt.subplot(121)
for idx in range(0,numProfiles):
plt.plot(spearmanMatrix[:,idx],
height[levelRangeCloud],'-o')
plt.ylim(height[levelRangeCloud[0]], height[levelRangeCloud[len(levelRangeCloud)-1]])
plt.xlabel('Spearman rank correlations [-]')
plt.ylabel('Altitude [m]')
plt.figure()
#plt.clf()
##pdb.set_trace()
#for idx in range(0,lenTimestepRangeCloud):
# plt.plot(uniformCloudBlock[idx,:],
# height[levelRangeCloud],'-o')
#plt.xlabel('Copolar radar reflectivity')
#plt.ylabel('Altitude [m]')
#plt.figure()
plt.clf()
TIMECLD, HEIGHTCLD = meshgrid(height[levelRangeCloud],
time_offset_radar_refl[timestepRangeCloud])
# either contourf or pcolormesh produces filled contours
uniformCloudBlockContour = plt.pcolormesh(HEIGHTCLD[:],TIMECLD[:],uniformCloudBlock)
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = plt.colorbar(uniformCloudBlockContour)
cbar.ax.set_ylabel('Normalized Reflectivity []')
# Add the contour line levels to the colorbar
#cbar.add_lines(radarContour)
# Plot horizontal line corresponding to time series plot later
plt.plot( [ beginTimeOfCloud , endTimeOfCloud ],
[ height[range_level], height[range_level] ], 'k' )
#pdb.set_trace()
# Plot vertical line corresponding to histogram of reflectivity
plt.plot( [ time_of_cloud , time_of_cloud ],
[ height[levelRangeCloud[0]], height[levelRangeCloud[len(levelRangeCloud)-1]] ], 'k' )
plt.title('Normalized reflectivity')
plt.xlabel('Time [' + time_offset_radar_refl.units + ']')
plt.ylabel('Altitude [m]')
plt.figure()
radar_refl_nc.close()
# Compute mean and variance of truncated time series
truncMean = mean( reflCompressed )
truncVarnce = var( reflCompressed )
print "truncMean = %s" %truncMean
print "sqrt(truncVarnce) = %s" %sqrt(truncVarnce)
# Compute parameters of truncated normal distribution
muInit = 2*truncMean
sigmaInit = 2*sqrt(truncVarnce)
mu, sigma = findTruncNormalRoots(truncMean,truncVarnce,muInit,sigmaInit,minThreshRefl)
print "mu = %s" %mu
print "sigma = %s" %sigma
# Compute empirical distribution function of data
reflCompressedSorted = sort(reflCompressed)
reflEdf = (rankdata(reflCompressedSorted) - 1)/lenReflCompressed
normCdf = norm.cdf( reflCompressedSorted, loc=truncMean, scale=sqrt(truncVarnce) )
truncNormCdf = ( norm.cdf(reflCompressedSorted,mu,sigma) \
- norm.cdf((minRefl-mu)/sigma) ) \
/(1.0-norm.cdf((minRefl-mu)/sigma))
minRefl = amin(reflCompressedSorted)
expMuLogN = (truncMean-minRefl)/sqrt(1+truncVarnce/((truncMean-minRefl)**2))
sigma2LogN = log(1+truncVarnce/((truncMean-minRefl)**2))
lognormCdf = lognorm.cdf( reflCompressedSorted - minRefl, sqrt(sigma2LogN),
loc=0, scale=expMuLogN )
#pdb.set_trace()
DnNormCdf = findKSDn(normCdf, reflEdf)
DnTruncNormCdf = findKSDn(truncNormCdf, reflEdf)
DnLognormCdf = findKSDn(lognormCdf, reflEdf)
print "KS statistic Dn"
print "DnNormCdf = %s" %DnNormCdf
print "DnTruncNormCdf = %s" %DnTruncNormCdf
print "DnLognormCdf = %s" %DnLognormCdf
plt.clf()
# Plot cumulative distribution functions
# Empirical CDF
plt.plot( reflCompressedSorted , reflEdf, label="Empirical" )
# Normal CDF
plt.plot( reflCompressedSorted ,
normCdf ,
label="Normal" )
# Truncated normal CDF
truncNormCurve = plt.plot( reflCompressedSorted,
truncNormCdf ,
label="Truncated normal")
# Lognormal CDF
plt.plot( reflCompressedSorted ,
lognormCdf ,
label="Lognorm" )
plt.xlabel('Copolar radar reflectivity')
plt.ylabel('Cumulative distribution function')
plt.title('Height = %s m' %height[range_level] )
plt.legend(loc="best")
plt.figure()
# Plot time series of radar reflectivity
plt.clf()
plt.subplot(211)
plt.plot(time_offset_radar_refl[time_range],reflCopol[:,range_level].data)
plt.ylim((minThreshRefl,1.1*max(reflCopol[:,range_level]).data))
plt.xlabel('Time [' + time_offset_radar_refl.units + ']')
plt.ylabel('Copolar radar reflectivity')
plt.title('Height = %s m. Stdev. = %s dBZ. Sk = %s' \
% ( height[range_level], std(reflCompressed), skew(reflCompressed) ) )
#pdb.set_trace()
# Plot histogram of copolar radar reflectivity
plt.subplot(212)
n, bins, patches = plt.hist(reflCompressed,
50, normed=True, histtype='stepfilled')
plt.setp(patches, 'facecolor', 'g', 'alpha', 0.75)
# Overplot best-fit truncated normal
truncNormCurve = plt.plot(reflRange,
norm.pdf(reflRange,loc=mu,scale=sigma)/(1.0-norm.cdf((minRefl-mu)/sigma)),
label="Truncated normal")
# Overplot best-fit normal
normCurve = plt.plot( reflRange,
norm.pdf(reflRange,loc=truncMean,scale=sqrt(truncVarnce)) ,
label="Normal" )
# Overplot best-fit lognormal
plt.plot( reflRange ,
lognorm.pdf( reflRange - minRefl, sqrt(sigma2LogN),
loc=0, scale=expMuLogN ) , label="Lognormal" )
plt.xlabel('Copolar radar reflectivity')
plt.ylabel('Probability')
plt.legend()
#plt.show()
plt.draw()
#plt.close()
#exit | gpl-2.0 |
LohithBlaze/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
chutsu/robotics | prototype/tests/estimation/test_kf.py | 1 | 2594 | import unittest
import numpy as np
import matplotlib.pylab as plt
from prototype.estimation.kf import KF
def gaussian_noise(sigma):
if type(sigma) in [np.matrix, np.array]:
return sigma**2 * np.random.randn(sigma.shape[0], 1)
else:
return sigma**2 * np.random.randn()
def plot_trajectory(state_true, state_estimated):
plt.plot(state_true[:, 0], state_true[:, 2], color="red")
plt.scatter(state_estimated[:, 0].tolist()[::10],
state_estimated[:, 2].tolist()[::10],
marker="o",
color="blue")
class KFTest(unittest.TestCase):
def test_kf(self):
# Setup
dt = 0.1
mu = np.array([0.0, 0.0, 0.0, 0.0]).reshape(4, 1)
S = np.eye(4)
R = np.array([[0.01**2, 0.0, 0.0, 0.0],
[0.0, 0.01**2, 0.0, 0.0],
[0.0, 0.0, 0.01**2, 0.0],
[0.0, 0.0, 0.0, 0.01**2]])
Q = np.array([[0.4**2, -0.1],
[-0.1, 0.1**2]])
A = np.array([[1.0, 0.0975, 0.0, 0.0],
[0.0, 0.9512, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0975],
[0.0, 0.0, 0.0, 0.9512]])
B = np.array([[0.0025, 0.0],
[0.0488, 0.0],
[0.0, 0.0025],
[0.0, 0.0488]])
C = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0]])
kf = KF(mu=mu, S=S, R=R, Q=Q)
# Simulation parameters
t_end = 100
T = np.arange(0, t_end, dt)
x = np.array([0.0, 0.0, 0.0, 0.0]).reshape(4, 1)
# Simulation
state_true = []
state_estimated = []
for t in T:
# Update state
u = np.array([[1.0], [1.0]])
x = np.dot(A, x) + np.dot(B, u)
state_true.append(x)
# Take measurement + noise
d = gaussian_noise(kf.Q)
y = np.dot(C, x) + d
# KF
kf.prediction_update(A, B, u, dt)
kf.measurement_update(C, y)
# Store true and estimated
state_true.append(x)
state_estimated.append(kf.mu)
# Convert from list to numpy array then to matrix
state_true = np.array(state_true)
state_true = np.matrix(state_true)
state_estimated = np.array(state_estimated)
state_estimated = np.matrix(state_estimated)
# Plot trajectory
debug = False
if debug:
plot_trajectory(state_true, state_estimated)
plt.show()
| gpl-3.0 |
AlexanderFabisch/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
amsjavan/nazarkav | nazarkav/cleardata.py | 1 | 2043 | import pandas as pd
from bs4 import BeautifulSoup
import re
import hazm
import numpy as np
from sklearn.cross_validation import train_test_split
train = None
def fetch_data():
df = pd.DataFrame(data={'c1': [1111111, 2, 3], 'c2': [4, 5, 6]})
df.to_csv('test.tsv', sep="\t", index=False)
print(df)
def balance_data():
cols = ['comment', 'c', 'rate', 'name', 'date']
# Read the labeled data
data = pd.read_csv('data/hotel-dataset.csv', error_bad_lines=False, names=cols)
# Sampling
pos_data = data[data['c'] == 'pos'].sample(n=2000)
neg_data = data[data['c'] == 'neg'].sample(n=2000)
# Concat
hotel_polarity = pd.concat([pos_data, neg_data], ignore_index=True)
# Save to file
hotel_polarity[['comment', 'c']].to_csv('data/hotel-polarity.tsv', sep='\t', encoding='utf8', index=False)
def remove_tag():
with open('data/hotel-dataset.csv', 'r', encoding='utf8') as f:
hotel_data = f.read()
with open('data/hotel-dataset.csv', 'w', encoding='utf8') as f:
f.write(BeautifulSoup(hotel_data, "html.parser").get_text())
def remove_nonletter():
with open('data/hotel-dataset.tsv', 'r', encoding='utf8') as f:
hotel_data = f.read()
with open('data/hotel-dataset.tsv', 'w', encoding='utf8') as f:
# Remove non-letters and save it
f.write(re.sub("[^a-zA-Z]", " ", hotel_data))
def bag_of_word():
hotel_pol = pd.read_csv('data/hotel-polarity.tsv', sep='\t')
tokenizer = hazm.WordTokenizer()
# def split():
# #numy
# train, test = train_test_split( data, train_size = 0.8, random_state = 44 )
#
# # for panda use following code
# all_i = np.arange( len( data ))
# train_i, test_i = train_test_split( all_i, train_size = 0.8, random_state = 44 )
# train = data.ix[train_i]
# test = data.ix[test_i]
#
# def metric():
# p = rf.predict_proba( test_x )
# auc = AUC( test_y, p[:,1] )
#
# def theano:
# #use in svd
remove_tag() | mit |
jseabold/statsmodels | statsmodels/regression/tests/test_rolling.py | 4 | 10967 | from io import BytesIO
from itertools import product
import warnings
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from statsmodels import tools
from statsmodels.regression.linear_model import WLS
from statsmodels.regression.rolling import RollingWLS, RollingOLS
def gen_data(nobs, nvar, const, pandas=False, missing=0.0, weights=False):
rs = np.random.RandomState(987499302)
x = rs.standard_normal((nobs, nvar))
cols = ["x{0}".format(i) for i in range(nvar)]
if const:
x = tools.add_constant(x)
cols = ["const"] + cols
if missing > 0.0:
mask = rs.random_sample(x.shape) < missing
x[mask] = np.nan
if x.shape[1] > 1:
y = x[:, :-1].sum(1) + rs.standard_normal(nobs)
else:
y = x.sum(1) + rs.standard_normal(nobs)
w = rs.chisquare(5, y.shape[0]) / 5
if pandas:
idx = pd.date_range("12-31-1999", periods=nobs)
x = pd.DataFrame(x, index=idx, columns=cols)
y = pd.Series(y, index=idx, name="y")
w = pd.Series(w, index=idx, name="weights")
if not weights:
w = None
return y, x, w
nobs = (250,)
nvar = (3, 0)
tf = (True, False)
missing = (0, 0.1)
params = list(product(nobs, nvar, tf, tf, missing))
params = [param for param in params if param[1] + param[2] > 0]
ids = ["-".join(map(str, param)) for param in params]
basic_params = [param for param in params if params[2] and params[4]]
weighted_params = [param + (tf,) for param in params for tf in (True, False)]
weighted_ids = ["-".join(map(str, param)) for param in weighted_params]
@pytest.fixture(scope="module", params=params, ids=ids)
def data(request):
return gen_data(*request.param)
@pytest.fixture(scope="module", params=basic_params, ids=ids)
def basic_data(request):
return gen_data(*request.param)
@pytest.fixture(scope="module", params=weighted_params, ids=weighted_ids)
def weighted_data(request):
return gen_data(*request.param)
def get_single(x, idx):
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[idx]
return x[idx]
def get_sub(x, idx, window):
if isinstance(x, (pd.Series, pd.DataFrame)):
out = x.iloc[idx - window : idx]
return np.asarray(out)
return x[idx - window : idx]
def test_has_nan(data):
y, x, w = data
mod = RollingWLS(y, x, window=100, weights=w)
has_nan = np.zeros(y.shape[0], dtype=bool)
for i in range(100, y.shape[0] + 1):
_y = get_sub(y, i, 100)
_x = get_sub(x, i, 100)
has_nan[i - 1] = np.squeeze(
(np.any(np.isnan(_y)) or np.any(np.isnan(_x)))
)
assert_array_equal(mod._has_nan, has_nan)
def test_weighted_against_wls(weighted_data):
y, x, w = weighted_data
mod = RollingWLS(y, x, weights=w, window=100)
res = mod.fit(use_t=True)
for i in range(100, y.shape[0]):
_y = get_sub(y, i, 100)
_x = get_sub(x, i, 100)
if w is not None:
_w = get_sub(w, i, 100)
else:
_w = np.ones_like(_y)
wls = WLS(_y, _x, weights=_w, missing="drop").fit()
rolling_params = get_single(res.params, i - 1)
rolling_nobs = get_single(res.nobs, i - 1)
assert_allclose(rolling_params, wls.params)
assert_allclose(rolling_nobs, wls.nobs)
assert_allclose(get_single(res.ssr, i - 1), wls.ssr)
assert_allclose(get_single(res.llf, i - 1), wls.llf)
assert_allclose(get_single(res.aic, i - 1), wls.aic)
assert_allclose(get_single(res.bic, i - 1), wls.bic)
assert_allclose(get_single(res.centered_tss, i - 1), wls.centered_tss)
assert_allclose(res.df_model, wls.df_model)
assert_allclose(get_single(res.df_resid, i - 1), wls.df_resid)
assert_allclose(get_single(res.ess, i - 1), wls.ess, atol=1e-8)
assert_allclose(res.k_constant, wls.k_constant)
assert_allclose(get_single(res.mse_model, i - 1), wls.mse_model)
assert_allclose(get_single(res.mse_resid, i - 1), wls.mse_resid)
assert_allclose(get_single(res.mse_total, i - 1), wls.mse_total)
assert_allclose(
get_single(res.rsquared, i - 1), wls.rsquared, atol=1e-8
)
assert_allclose(
get_single(res.rsquared_adj, i - 1), wls.rsquared_adj, atol=1e-8
)
assert_allclose(
get_single(res.uncentered_tss, i - 1), wls.uncentered_tss
)
@pytest.mark.parametrize("cov_type", ["nonrobust", "HC0"])
@pytest.mark.parametrize("use_t", [None, True, False])
def test_against_wls_inference(data, use_t, cov_type):
y, x, w = data
mod = RollingWLS(y, x, window=100, weights=w)
res = mod.fit(use_t=use_t, cov_type=cov_type)
ci_cols = ci = res.conf_int()
test_cols = x.shape[1] > 3
# This is a smoke test of cov_params to make sure it works
res.cov_params()
if test_cols:
ci_cols = res.conf_int(cols=[0, 2])
# Skip to improve performance
for i in range(100, y.shape[0]):
_y = get_sub(y, i, 100)
_x = get_sub(x, i, 100)
wls = WLS(_y, _x, missing="drop").fit(use_t=use_t, cov_type=cov_type)
assert_allclose(get_single(res.tvalues, i - 1), wls.tvalues)
assert_allclose(get_single(res.bse, i - 1), wls.bse)
assert_allclose(get_single(res.pvalues, i - 1), wls.pvalues, atol=1e-8)
assert_allclose(get_single(res.fvalue, i - 1), wls.fvalue)
with np.errstate(invalid="ignore"):
assert_allclose(
get_single(res.f_pvalue, i - 1), wls.f_pvalue, atol=1e-8
)
assert res.cov_type == wls.cov_type
assert res.use_t == wls.use_t
wls_ci = wls.conf_int()
if isinstance(ci, pd.DataFrame):
ci_val = ci.iloc[i - 1]
ci_val = np.asarray(ci_val).reshape((-1, 2))
else:
ci_val = ci[i - 1].T
assert_allclose(ci_val, wls_ci)
if test_cols:
wls_ci = wls.conf_int(cols=[0, 2])
if isinstance(ci_cols, pd.DataFrame):
ci_val = ci_cols.iloc[i - 1]
ci_val = np.asarray(ci_val).reshape((-1, 2))
else:
ci_val = ci_cols[i - 1].T
assert_allclose(ci_val, wls_ci)
def test_raise(data):
y, x, w = data
mod = RollingWLS(y, x, window=100, missing="drop", weights=w)
res = mod.fit()
params = np.asarray(res.params)
assert np.all(np.isfinite(params[99:]))
if not np.any(np.isnan(y)):
return
mod = RollingWLS(y, x, window=100, missing="skip")
res = mod.fit()
params = np.asarray(res.params)
assert np.any(np.isnan(params[100:]))
def test_error():
y, x, _ = gen_data(250, 2, True)
with pytest.raises(ValueError, match="reset must be a positive integer"):
RollingWLS(y, x,).fit(reset=-1)
with pytest.raises(ValueError):
RollingWLS(y, x).fit(method="unknown")
with pytest.raises(ValueError, match="min_nobs must be larger"):
RollingWLS(y, x, min_nobs=1)
with pytest.raises(ValueError, match="min_nobs must be larger"):
RollingWLS(y, x, window=60, min_nobs=100)
def test_save_load(data):
y, x, w = data
res = RollingOLS(y, x, window=60).fit()
fh = BytesIO()
# test wrapped results load save pickle
res.save(fh)
fh.seek(0, 0)
res_unpickled = res.__class__.load(fh)
assert type(res_unpickled) is type(res) # noqa: E721
fh = BytesIO()
# test wrapped results load save pickle
res.save(fh, remove_data=True)
fh.seek(0, 0)
res_unpickled = res.__class__.load(fh)
assert type(res_unpickled) is type(res) # noqa: E721
def test_formula():
y, x, w = gen_data(250, 3, True, pandas=True)
fmla = "y ~ 1 + x0 + x1 + x2"
data = pd.concat([y, x], axis=1)
mod = RollingWLS.from_formula(fmla, window=100, data=data, weights=w)
res = mod.fit()
alt = RollingWLS(y, x, window=100)
alt_res = alt.fit()
assert_allclose(res.params, alt_res.params)
ols_mod = RollingOLS.from_formula(fmla, window=100, data=data)
ols_mod.fit()
@pytest.mark.matplotlib
def test_plot():
import matplotlib.pyplot as plt
y, x, w = gen_data(250, 3, True, pandas=True)
fmla = "y ~ 1 + x0 + x1 + x2"
data = pd.concat([y, x], axis=1)
mod = RollingWLS.from_formula(fmla, window=100, data=data, weights=w)
res = mod.fit()
fig = res.plot_recursive_coefficient()
assert isinstance(fig, plt.Figure)
res.plot_recursive_coefficient(variables=2, alpha=None, figsize=(30, 7))
res.plot_recursive_coefficient(variables="x0", alpha=None, figsize=(30, 7))
res.plot_recursive_coefficient(
variables=[0, 2], alpha=None, figsize=(30, 7)
)
res.plot_recursive_coefficient(
variables=["x0"], alpha=None, figsize=(30, 7)
)
res.plot_recursive_coefficient(
variables=["x0", "x1", "x2"], alpha=None, figsize=(30, 7)
)
with pytest.raises(ValueError, match="variable x4 is not an integer"):
res.plot_recursive_coefficient(variables="x4")
fig = plt.Figure()
# Just silence the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
out = res.plot_recursive_coefficient(fig=fig)
assert out is fig
res.plot_recursive_coefficient(alpha=None, figsize=(30, 7))
@pytest.mark.parametrize("params_only", [True, False])
def test_methods(basic_data, params_only):
y, x, _ = basic_data
mod = RollingOLS(y, x, 150)
res_inv = mod.fit(method="inv", params_only=params_only)
res_lstsq = mod.fit(method="lstsq", params_only=params_only)
res_pinv = mod.fit(method="pinv", params_only=params_only)
assert_allclose(res_inv.params, res_lstsq.params)
assert_allclose(res_inv.params, res_pinv.params)
@pytest.mark.parametrize("method", ["inv", "lstsq", "pinv"])
def test_params_only(basic_data, method):
y, x, _ = basic_data
mod = RollingOLS(y, x, 150)
res = mod.fit(method=method, params_only=False)
res_params_only = mod.fit(method=method, params_only=True)
# use assert_allclose to incorporate for numerical errors on x86 platforms
assert_allclose(res_params_only.params, res.params)
def test_min_nobs(basic_data):
y, x, w = basic_data
if not np.any(np.isnan(np.asarray(x))):
return
mod = RollingOLS(y, x, 150)
res = mod.fit()
# Ensures that the constraint binds
min_nobs = res.nobs[res.nobs != 0].min() + 1
mod = RollingOLS(y, x, 150, min_nobs=min_nobs)
res = mod.fit()
assert np.all(res.nobs[res.nobs != 0] >= min_nobs)
def test_expanding(basic_data):
y, x, w = basic_data
xa = np.asarray(x)
mod = RollingOLS(y, x, 150, min_nobs=50, expanding=True)
res = mod.fit()
params = np.asarray(res.params)
assert np.all(np.isnan(params[:49]))
first = np.where(np.cumsum(np.all(np.isfinite(xa), axis=1)) >= 50)[0][0]
assert np.all(np.isfinite(params[first:]))
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/scipy/misc/common.py | 12 | 11958 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from numpy import (exp, log, asarray, arange, newaxis, hstack, product, array,
zeros, eye, poly1d, r_, sum, fromstring, isfinite,
squeeze, amax, reshape)
from scipy._lib._version import NumpyVersion
__all__ = ['logsumexp', 'central_diff_weights', 'derivative', 'pade', 'lena',
'ascent', 'face']
_NUMPY_170 = (NumpyVersion(numpy.__version__) >= NumpyVersion('1.7.0'))
def logsumexp(a, axis=None, b=None, keepdims=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed. Tuple of ints is not accepted if NumPy
version is lower than 1.7.0.
.. versionadded:: 0.11.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
.. versionadded:: 0.12.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.misc import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
"""
a = asarray(a)
# keepdims is available in numpy.sum and numpy.amax since NumPy 1.7.0
#
# Because SciPy supports versions earlier than 1.7.0, we have to handle
# those old versions differently
if not _NUMPY_170:
# When support for Numpy < 1.7.0 is dropped, this implementation can be
# removed. This implementation is a bit hacky. Similarly to old NumPy's
# sum and amax functions, 'axis' must be an integer or None, tuples and
# lists are not supported. Although 'keepdims' is not supported by these
# old NumPy's functions, this function supports it.
# Solve the shape of the reduced array
if axis is None:
sh_keepdims = (1,) * a.ndim
else:
sh_keepdims = list(a.shape)
sh_keepdims[axis] = 1
a_max = amax(a, axis=axis)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
tmp = b * exp(a - reshape(a_max, sh_keepdims))
else:
tmp = exp(a - reshape(a_max, sh_keepdims))
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
out = log(sum(tmp, axis=axis))
out += a_max
if keepdims:
# Put back the reduced axes with size one
out = reshape(out, sh_keepdims)
else:
# This is a more elegant implementation, requiring NumPy >= 1.7.0
a_max = amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
tmp = b * exp(a - a_max)
else:
tmp = exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
out = log(sum(tmp, axis=axis, keepdims=keepdims))
if not keepdims:
a_max = squeeze(a_max, axis=axis)
out += a_max
return out
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def f(x):
... return x**3 + x**2
...
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The pade approximation of the polynomial defined by `an` is
`p(x)/q(x)`.
Examples
--------
>>> from scipy import misc
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = misc.pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Notes
-----
Though safe for work in most places, this sexualized image is drawn from
Playboy and makes some viewers uncomfortable. It has been very widely
used as an example in image processing and is therefore made available
for compatibility. For new code that needs an example image we recommend
`face` or `ascent`.
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(pickle.load(f))
f.close()
return lena
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True then return color image, otherwise return an 8-bit gray-scale
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
230
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| gpl-2.0 |
davidgbe/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
kashif/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
saketkc/statsmodels | examples/python/glm_formula.py | 33 | 1547 |
## Generalized Linear Models (Formula)
# This notebook illustrates how you can use R-style formulas to fit Generalized Linear Models.
#
# To begin, we load the ``Star98`` dataset and we construct a formula and pre-process the data:
from __future__ import print_function
import statsmodels.api as sm
import statsmodels.formula.api as smf
star98 = sm.datasets.star98.load_pandas().data
formula = 'SUCCESS ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
dta = star98[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP',
'PCTCHRT', 'PCTYRRND', 'PERMINTE', 'AVYRSEXP', 'AVSALK',
'PERSPENK', 'PTRATIO', 'PCTAF']]
endog = dta['NABOVE'] / (dta['NABOVE'] + dta.pop('NBELOW'))
del dta['NABOVE']
dta['SUCCESS'] = endog
# Then, we fit the GLM model:
mod1 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod1.summary()
# Finally, we define a function to operate customized data transformation using the formula framework:
def double_it(x):
return 2 * x
formula = 'SUCCESS ~ double_it(LOWINC) + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
mod2 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod2.summary()
# As expected, the coefficient for ``double_it(LOWINC)`` in the second model is half the size of the ``LOWINC`` coefficient from the first model:
print(mod1.params[1])
print(mod2.params[1] * 2)
| bsd-3-clause |
jseabold/statsmodels | statsmodels/nonparametric/kde.py | 4 | 19093 | """
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
https://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.validation import array_like, float_like
from . import bandwidths
from .kdetools import forrt, revrt, silverman_transform
from .linbin import fast_linbin
# Kernels Switch for estimators
kernel_switch = dict(
gau=kernels.Gaussian,
epa=kernels.Epanechnikov,
uni=kernels.Uniform,
tri=kernels.Triangular,
biw=kernels.Biweight,
triw=kernels.Triweight,
cos=kernels.Cosine,
cos2=kernels.Cosine2,
)
def _checkisfit(self):
try:
self.density
except Exception:
raise ValueError("Call fit to fit the density first")
# Kernel Density Estimator Class
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array_like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = array_like(endog, "endog", ndim=1, contiguous=True)
def fit(
self,
kernel="gau",
bw="normal_reference",
fft=True,
weights=None,
gridsize=None,
adjust=1,
cut=3,
clip=(-np.inf, np.inf),
):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of x so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(x) or max(x)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
Returns
-------
KDEUnivariate
The instance fit,
"""
if isinstance(bw, str):
self.bw_method = bw
else:
self.bw_method = "user-given"
if not callable(bw):
bw = float_like(bw, "bw")
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(
endog,
kernel=kernel,
bw=bw,
adjust=adjust,
weights=weights,
gridsize=gridsize,
clip=clip,
cut=cut,
)
else:
density, grid, bw = kdensity(
endog,
kernel=kernel,
bw=bw,
adjust=adjust,
weights=weights,
gridsize=gridsize,
clip=clip,
cut=cut,
)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = {}
return self
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a, b = -np.inf, np.inf
else:
a, b = kern.domain
def func(x, s):
return kern.density(s, x)
support = self.support
support = np.r_[a, support]
gridsize = len(support)
endog = self.endog
probs = [
integrate.quad(func, support[i - 1], support[i], args=endog)[0]
for i in range(1, gridsize)
]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x, s):
pdf = kern.density(s, x)
return pdf * np.log(pdf + 1e-12)
kern = self.kernel
if kern.domain is not None:
a, b = self.domain
else:
a, b = -np.inf, np.inf
endog = self.endog
# TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a, b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0, 1, gridsize))
def evaluate(self, point):
"""
Evaluate density at a point or points.
Parameters
----------
point : {float, ndarray}
Point(s) at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
# Kernel Density Estimator Functions
def kdensity(
x,
kernel="gau",
bw="normal_reference",
weights=None,
gridsize=None,
adjust=1,
clip=(-np.inf, np.inf),
cut=3,
retgrid=True,
):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(x), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(x) or max(x)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
clip_x = np.logical_and(x > clip[0], x < clip[1])
x = x[clip_x]
nobs = len(x) # after trim
if gridsize is None:
gridsize = max(nobs, 50) # do not need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given x."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
if callable(bw):
bw = float(bw(x, kern))
# user passed a callable custom bandwidth function
elif isinstance(bw, str):
bw = bandwidths.select_bandwidth(x, bw, kern)
# will cross-val fit this pattern?
else:
bw = float_like(bw, "bw")
bw *= adjust
a = np.min(x, axis=0) - cut * bw
b = np.max(x, axis=0) + cut * bw
grid = np.linspace(a, b, gridsize)
k = (
x.T - grid[:, None]
) / bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if (
kern.domain is not None
): # will not work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k < 0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k, weights) / (q * bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(
x,
kernel="gau",
bw="normal_reference",
weights=None,
gridsize=None,
adjust=1,
clip=(-np.inf, np.inf),
cut=3,
retgrid=True,
):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
x : array_like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float, callable
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(x),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, its value is used as the bandwidth.
- If a callable is given, it's return value is used.
The callable should take exactly two parameters, i.e.,
fn(x, kern), and return a float, where:
* x - the clipped input data
* kern - the kernel instance used
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the x value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(x), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in x that are outside of the range given by clip are
dropped. The number of observations in x is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of x
so that the kernel goes to zero. The end points are
-/+ cut*bw*{x.min() or x.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : ndarray
The densities estimated at the grid points.
grid : ndarray, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights are not implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
----------
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
x = np.asarray(x)
# will not work for two columns.
x = x[np.logical_and(x > clip[0], x < clip[1])]
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
if callable(bw):
bw = float(bw(x, kern))
# user passed a callable custom bandwidth function
elif isinstance(bw, str):
# if bw is None, select optimal bandwidth for kernel
bw = bandwidths.select_bandwidth(x, bw, kern)
# will cross-val fit this pattern?
else:
bw = float_like(bw, "bw")
bw *= adjust
nobs = len(x) # after trim
# 1 Make grid and discretize the data
if gridsize is None:
gridsize = np.max((nobs, 512.0))
gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(x) - cut * bw
b = np.max(x) + cut * bw
grid, delta = np.linspace(a, b, int(gridsize), retstep=True)
RANGE = b - a
# TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(x,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of x in the grid here
# Xingrid = x[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
# NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(x, a, b, gridsize) / (delta * nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# do not have to redo the above if just changing bw, ie., for cross val
# NOTE: silverman_transform is the closed form solution of the FFT of the
# gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE) * y
# 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 103 | 41083 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
rasbt/python-machine-learning-book-2nd-edition | code/ch05/ch05.py | 1 | 26250 | # coding: utf-8
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
# *Python Machine Learning 2nd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2017
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)
# # Python Machine Learning - Code Examples
# # Chapter 5 - Compressing Data via Dimensionality Reduction
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
# *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.*
# ### Overview
# - [Unsupervised dimensionality reduction via principal component analysis 128](#Unsupervised-dimensionality-reduction-via-principal-component-analysis-128)
# - [The main steps behind principal component analysis](#The-main-steps-behind-principal-component-analysis)
# - [Extracting the principal components step-by-step](#Extracting-the-principal-components-step-by-step)
# - [Total and explained variance](#Total-and-explained-variance)
# - [Feature transformation](#Feature-transformation)
# - [Principal component analysis in scikit-learn](#Principal-component-analysis-in-scikit-learn)
# - [Supervised data compression via linear discriminant analysis](#Supervised-data-compression-via-linear-discriminant-analysis)
# - [Principal component analysis versus linear discriminant analysis](#Principal-component-analysis-versus-linear-discriminant-analysis)
# - [The inner workings of linear discriminant analysis](#The-inner-workings-of-linear-discriminant-analysis)
# - [Computing the scatter matrices](#Computing-the-scatter-matrices)
# - [Selecting linear discriminants for the new feature subspace](#Selecting-linear-discriminants-for-the-new-feature-subspace)
# - [Projecting samples onto the new feature space](#Projecting-samples-onto-the-new-feature-space)
# - [LDA via scikit-learn](#LDA-via-scikit-learn)
# - [Using kernel principal component analysis for nonlinear mappings](#Using-kernel-principal-component-analysis-for-nonlinear-mappings)
# - [Kernel functions and the kernel trick](#Kernel-functions-and-the-kernel-trick)
# - [Implementing a kernel principal component analysis in Python](#Implementing-a-kernel-principal-component-analysis-in-Python)
# - [Example 1 – separating half-moon shapes](#Example-1:-Separating-half-moon-shapes)
# - [Example 2 – separating concentric circles](#Example-2:-Separating-concentric-circles)
# - [Projecting new data points](#Projecting-new-data-points)
# - [Kernel principal component analysis in scikit-learn](#Kernel-principal-component-analysis-in-scikit-learn)
# - [Summary](#Summary)
# # Unsupervised dimensionality reduction via principal component analysis
# ## The main steps behind principal component analysis
# ## Extracting the principal components step-by-step
df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/'
'machine-learning-databases/wine/wine.data',
header=None)
# if the Wine dataset is temporarily unavailable from the
# UCI machine learning repository, un-comment the following line
# of code to load the dataset from a local path:
# df_wine = pd.read_csv('wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
df_wine.head()
# Splitting the data into 70% training and 30% test subsets.
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
stratify=y,
random_state=0)
# Standardizing the data.
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
# ---
#
# **Note**
#
# Accidentally, I wrote `X_test_std = sc.fit_transform(X_test)` instead of `X_test_std = sc.transform(X_test)`. In this case, it wouldn't make a big difference since the mean and standard deviation of the test set should be (quite) similar to the training set. However, as remember from Chapter 3, the correct way is to re-use parameters from the training set if we are doing any kind of transformation -- the test set should basically stand for "new, unseen" data.
#
# My initial typo reflects a common mistake is that some people are *not* re-using these parameters from the model training/building and standardize the new data "from scratch." Here's simple example to explain why this is a problem.
#
# Let's assume we have a simple training set consisting of 3 samples with 1 feature (let's call this feature "length"):
#
# - train_1: 10 cm -> class_2
# - train_2: 20 cm -> class_2
# - train_3: 30 cm -> class_1
#
# mean: 20, std.: 8.2
#
# After standardization, the transformed feature values are
#
# - train_std_1: -1.21 -> class_2
# - train_std_2: 0 -> class_2
# - train_std_3: 1.21 -> class_1
#
# Next, let's assume our model has learned to classify samples with a standardized length value < 0.6 as class_2 (class_1 otherwise). So far so good. Now, let's say we have 3 unlabeled data points that we want to classify:
#
# - new_4: 5 cm -> class ?
# - new_5: 6 cm -> class ?
# - new_6: 7 cm -> class ?
#
# If we look at the "unstandardized "length" values in our training datast, it is intuitive to say that all of these samples are likely belonging to class_2. However, if we standardize these by re-computing standard deviation and and mean you would get similar values as before in the training set and your classifier would (probably incorrectly) classify samples 4 and 5 as class 2.
#
# - new_std_4: -1.21 -> class 2
# - new_std_5: 0 -> class 2
# - new_std_6: 1.21 -> class 1
#
# However, if we use the parameters from your "training set standardization," we'd get the values:
#
# - sample5: -18.37 -> class 2
# - sample6: -17.15 -> class 2
# - sample7: -15.92 -> class 2
#
# The values 5 cm, 6 cm, and 7 cm are much lower than anything we have seen in the training set previously. Thus, it only makes sense that the standardized features of the "new samples" are much lower than every standardized feature in the training set.
#
# ---
# Eigendecomposition of the covariance matrix.
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
print('\nEigenvalues \n%s' % eigen_vals)
# **Note**:
#
# Above, I used the [`numpy.linalg.eig`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html) function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors.
# <pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre>
# This is not really a "mistake," but probably suboptimal. It would be better to use [`numpy.linalg.eigh`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html) in such cases, which has been designed for [Hermetian matrices](https://en.wikipedia.org/wiki/Hermitian_matrix). The latter always returns real eigenvalues; whereas the numerically less stable `np.linalg.eig` can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.)
#
# ## Total and explained variance
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center',
label='individual explained variance')
plt.step(range(1, 14), cum_var_exp, where='mid',
label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal component index')
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('images/05_02.png', dpi=300)
plt.show()
# ## Feature transformation
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(key=lambda k: k[0], reverse=True)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis],
eigen_pairs[1][1][:, np.newaxis]))
print('Matrix W:\n', w)
# **Note**
# Depending on which version of NumPy and LAPACK you are using, you may obtain the Matrix W with its signs flipped. Please note that this is not an issue: If $v$ is an eigenvector of a matrix $\Sigma$, we have
#
# $$\Sigma v = \lambda v,$$
#
# where $\lambda$ is our eigenvalue,
#
#
# then $-v$ is also an eigenvector that has the same eigenvalue, since
# $$\Sigma \cdot (-v) = -\Sigma v = -\lambda v = \lambda \cdot (-v).$$
X_train_std[0].dot(w)
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0],
X_train_pca[y_train == l, 1],
c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_03.png', dpi=300)
plt.show()
# ## Principal component analysis in scikit-learn
# **NOTE**
#
# The following four code cells has been added in addition to the content to the book, to illustrate how to replicate the results from our own PCA implementation in scikit-learn:
pca = PCA()
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center')
plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.6,
c=cmap(idx),
edgecolor='black',
marker=markers[idx],
label=cl)
# Training logistic regression classifier using the first 2 principal components.
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
lr = LogisticRegression()
lr = lr.fit(X_train_pca, y_train)
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_04.png', dpi=300)
plt.show()
plot_decision_regions(X_test_pca, y_test, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_05.png', dpi=300)
plt.show()
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
# # Supervised data compression via linear discriminant analysis
# ## Principal component analysis versus linear discriminant analysis
# ## The inner workings of linear discriminant analysis
# ## Computing the scatter matrices
# Calculate the mean vectors for each class:
np.set_printoptions(precision=4)
mean_vecs = []
for label in range(1, 4):
mean_vecs.append(np.mean(X_train_std[y_train == label], axis=0))
print('MV %s: %s\n' % (label, mean_vecs[label - 1]))
# Compute the within-class scatter matrix:
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.zeros((d, d)) # scatter matrix for each class
for row in X_train_std[y_train == label]:
row, mv = row.reshape(d, 1), mv.reshape(d, 1) # make column vectors
class_scatter += (row - mv).dot((row - mv).T)
S_W += class_scatter # sum class scatter matrices
print('Within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1]))
# Better: covariance matrix since classes are not equally distributed:
print('Class label distribution: %s'
% np.bincount(y_train)[1:])
d = 13 # number of features
S_W = np.zeros((d, d))
for label, mv in zip(range(1, 4), mean_vecs):
class_scatter = np.cov(X_train_std[y_train == label].T)
S_W += class_scatter
print('Scaled within-class scatter matrix: %sx%s' % (S_W.shape[0],
S_W.shape[1]))
# Compute the between-class scatter matrix:
mean_overall = np.mean(X_train_std, axis=0)
d = 13 # number of features
S_B = np.zeros((d, d))
for i, mean_vec in enumerate(mean_vecs):
n = X_train[y_train == i + 1, :].shape[0]
mean_vec = mean_vec.reshape(d, 1) # make column vector
mean_overall = mean_overall.reshape(d, 1) # make column vector
S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T)
print('Between-class scatter matrix: %sx%s' % (S_B.shape[0], S_B.shape[1]))
# ## Selecting linear discriminants for the new feature subspace
# Solve the generalized eigenvalue problem for the matrix $S_W^{-1}S_B$:
eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
# **Note**:
#
# Above, I used the [`numpy.linalg.eig`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html) function to decompose the symmetric covariance matrix into its eigenvalues and eigenvectors.
# <pre>>>> eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)</pre>
# This is not really a "mistake," but probably suboptimal. It would be better to use [`numpy.linalg.eigh`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html) in such cases, which has been designed for [Hermetian matrices](https://en.wikipedia.org/wiki/Hermitian_matrix). The latter always returns real eigenvalues; whereas the numerically less stable `np.linalg.eig` can decompose nonsymmetric square matrices, you may find that it returns complex eigenvalues in certain cases. (S.R.)
#
# Sort eigenvectors in descending order of the eigenvalues:
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in descending order:\n')
for eigen_val in eigen_pairs:
print(eigen_val[0])
tot = sum(eigen_vals.real)
discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)]
cum_discr = np.cumsum(discr)
plt.bar(range(1, 14), discr, alpha=0.5, align='center',
label='individual "discriminability"')
plt.step(range(1, 14), cum_discr, where='mid',
label='cumulative "discriminability"')
plt.ylabel('"discriminability" ratio')
plt.xlabel('Linear Discriminants')
plt.ylim([-0.1, 1.1])
plt.legend(loc='best')
plt.tight_layout()
# plt.savefig('images/05_07.png', dpi=300)
plt.show()
w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real,
eigen_pairs[1][1][:, np.newaxis].real))
print('Matrix W:\n', w)
# ## Projecting samples onto the new feature space
X_train_lda = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_lda[y_train == l, 0],
X_train_lda[y_train == l, 1] * (-1),
c=c, label=l, marker=m)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower right')
plt.tight_layout()
# plt.savefig('images/05_08.png', dpi=300)
plt.show()
# ## LDA via scikit-learn
lda = LDA(n_components=2)
X_train_lda = lda.fit_transform(X_train_std, y_train)
lr = LogisticRegression()
lr = lr.fit(X_train_lda, y_train)
plot_decision_regions(X_train_lda, y_train, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_09.png', dpi=300)
plt.show()
X_test_lda = lda.transform(X_test_std)
plot_decision_regions(X_test_lda, y_test, classifier=lr)
plt.xlabel('LD 1')
plt.ylabel('LD 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('images/05_10.png', dpi=300)
plt.show()
# # Using kernel principal component analysis for nonlinear mappings
# ## Implementing a kernel principal component analysis in Python
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# scipy.linalg.eigh returns them in ascending order
eigvals, eigvecs = eigh(K)
eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1]
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, i]
for i in range(n_components)))
return X_pc
# ### Example 1: Separating half-moon shapes
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
plt.tight_layout()
# plt.savefig('images/05_12.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_13.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(7,3))
ax[0].scatter(X_kpca[y==0, 0], X_kpca[y==0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y==1, 0], X_kpca[y==1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y==0, 0], np.zeros((50,1))+0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y==1, 0], np.zeros((50,1))-0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_14.png', dpi=300)
plt.show()
# ### Example 2: Separating concentric circles
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5)
plt.tight_layout()
# plt.savefig('images/05_15.png', dpi=300)
plt.show()
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_16.png', dpi=300)
plt.show()
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.tight_layout()
# plt.savefig('images/05_17.png', dpi=300)
plt.show()
# ## Projecting new data points
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
alphas: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# scipy.linalg.eigh returns them in ascending order
eigvals, eigvecs = eigh(K)
eigvals, eigvecs = eigvals[::-1], eigvecs[:, ::-1]
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, i]
for i in range(n_components)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[i] for i in range(n_components)]
return alphas, lambdas
X, y = make_moons(n_samples=100, random_state=123)
alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1)
x_new = X[25]
x_new
x_proj = alphas[25] # original projection
x_proj
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - row)**2) for row in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
# projection of the "new" datapoint
x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas)
x_reproj
plt.scatter(alphas[y == 0, 0], np.zeros((50)),
color='red', marker='^', alpha=0.5)
plt.scatter(alphas[y == 1, 0], np.zeros((50)),
color='blue', marker='o', alpha=0.5)
plt.scatter(x_proj, 0, color='black',
label='original projection of point X[25]', marker='^', s=100)
plt.scatter(x_reproj, 0, color='green',
label='remapped point X[25]', marker='x', s=500)
plt.legend(scatterpoints=1)
plt.tight_layout()
# plt.savefig('images/05_18.png', dpi=300)
plt.show()
# ## Kernel principal component analysis in scikit-learn
X, y = make_moons(n_samples=100, random_state=123)
scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15)
X_skernpca = scikit_kpca.fit_transform(X)
plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.tight_layout()
# plt.savefig('images/05_19.png', dpi=300)
plt.show()
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
| mit |
Djabbz/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
ContinuumIO/blaze | blaze/compute/tests/test_spark.py | 3 | 8190 | from __future__ import absolute_import, division, print_function
import pytest
pyspark = pytest.importorskip('pyspark')
import pandas as pd
from blaze import compute, symbol, summary, exp, by, join, merge
from toolz import identity
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
data2 = [['Alice', 'Austin'],
['Bob', 'Boston']]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
# this only exists because we need to have a single session scoped spark
# context, otherwise these would simply be global variables
@pytest.fixture
def rdd(sc):
return sc.parallelize(data)
@pytest.fixture
def rdd2(sc):
return sc.parallelize(data2)
t = symbol('t', 'var * {name: string, amount: int, id: int}')
t2 = symbol('t2', 'var * {name: string, city: string}')
# Web Commons Graph Example data
data_idx = [['A', 1],
['B', 2],
['C', 3]]
data_arc = [[1, 3],
[2, 3],
[3, 1]]
t_idx = symbol('idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('arc', 'var * {node_out: int32, node_id: int32}')
def test_symbol(rdd):
assert compute(t, rdd) == rdd
def test_projection(rdd):
assert compute(t['name'], rdd).collect() == [row[0] for row in data]
def test_multicols_projection(rdd):
result = compute(t[['amount', 'name']], rdd).collect()
expected = [(100, 'Alice'), (200, 'Bob'), (50, 'Alice')]
print(result)
print(expected)
assert result == expected
reduction_exprs = (t['amount'].sum(),
t['amount'].min(),
t['amount'].max(),
t['amount'].nunique(),
t['name'].nunique(),
t['amount'].count(),
(t['amount'] > 150).any(),
(t['amount'] > 150).all(),
t['amount'].mean(),
t['amount'].var(),
summary(a=t.amount.sum(), b=t.id.count()),
t['amount'].std())
@pytest.mark.parametrize('expr', reduction_exprs)
def test_reductions(expr, rdd):
result = compute(expr, rdd)
expected = compute(expr, data)
if not result == expected:
print(result)
print(expected)
if isinstance(result, float):
assert abs(result - expected) < 0.001
else:
assert result == expected
exprs = (t['amount'],
t['amount'] == 100,
t['amount'].truncate(150),
t[t['name'] == 'Alice'],
t[t['amount'] == 0],
t[t['amount'] > 150],
t['amount'] + t['id'],
t['amount'] % t['id'],
exp(t['amount']),
by(t['name'], total=t['amount'].sum()),
by(t['name'], total=(t['amount'] + 1).sum()),
(t['amount'] * 1).label('foo'),
t.map(lambda tup: tup[1] + tup[2], 'real'),
t[t.name.like('Alice')],
t['amount'].apply(identity, 'var * real', splittable=True),
t['amount'].map(lambda x: x + 1, 'int'))
exprs = list(zip(map(str, exprs), exprs))
def tuplify(x):
return tuple(x) if isinstance(x, list) else x
@pytest.mark.parametrize(['string', 'expr'], exprs)
def test_basic(rdd, string, expr):
result = set(map(tuplify, compute(expr, rdd).collect()))
expected = set(map(tuplify, compute(expr, data)))
assert result == expected
tbig = symbol(
'tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
big_exprs = [
by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum())]
@pytest.mark.parametrize('expr', big_exprs)
def test_big_by(sc, expr):
data = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
rdd = sc.parallelize(data)
result = set(map(tuplify, compute(expr, rdd).collect()))
expected = set(map(tuplify, compute(expr, data)))
assert result == expected
def test_head(rdd):
assert list(compute(t.head(1), rdd)) == list(compute(t.head(1), data))
sort_exprs = [
t.sort('amount'),
t.sort('amount', ascending=True),
t.sort(t['amount'], ascending=True),
t.sort(-t['amount'].label('foo') + 1, ascending=True),
t.sort(['amount', 'id'])
]
@pytest.mark.parametrize('expr', sort_exprs)
def test_sort(rdd, expr):
result = compute(expr, rdd).collect()
expected = list(compute(expr, data))
assert result == expected
def test_distinct(rdd):
assert set(compute(t['name'].distinct(), rdd).collect()) == \
set(['Alice', 'Bob'])
@pytest.mark.xfail(
raises=NotImplementedError,
reason='cannot specify columns to distinct on yet',
)
def test_distinct_on(rdd):
compute(t.distinct('name'), rdd)
def test_join(rdd, rdd2):
joined = join(t, t2, 'name')
expected = [('Alice', 100, 1, 'Austin'),
('Bob', 200, 2, 'Boston'),
('Alice', 50, 3, 'Austin')]
result = compute(joined, {t: rdd, t2: rdd2}).collect()
assert all(i in expected for i in result)
def test_multi_column_join(sc):
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
rleft = sc.parallelize(left)
rright = sc.parallelize(right)
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
result = compute(j, {L: rleft, R: rright})
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
assert set(result.collect()) == set(expected)
def test_groupby(sc):
rddidx = sc.parallelize(data_idx)
rddarc = sc.parallelize(data_arc)
joined = join(t_arc, t_idx, "node_id")
t = by(joined['name'], count=joined['node_id'].count())
a = compute(t, {t_arc: rddarc, t_idx: rddidx})
in_degree = dict(a.collect())
assert in_degree == {'A': 1, 'C': 2}
def test_multi_level_rowfunc_works(rdd):
expr = t['amount'].map(lambda x: x + 1, 'int')
assert compute(expr, rdd).collect() == [x[1] + 1 for x in data]
@pytest.mark.xfail(NotImplementedError, reason='how is this done in spark?')
def test_merge(rdd):
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert compute(expr, rdd).collect() == [
(row[0], row[1] * 2) for row in data]
def test_selection_out_of_order(rdd):
expr = t['name'][t['amount'] < 100]
assert compute(expr, rdd).collect() == ['Alice']
def test_recursive_rowfunc_is_used(rdd):
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2 * (101 + 53)),
('Bob', 2 * (202))]
assert set(compute(expr, rdd).collect()) == set(expected)
def test_outer_join(sc):
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = sc.parallelize(left)
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = sc.parallelize(right)
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# Full outer join not yet supported
assert set(compute(join(L, R, how='outer'), {L: left, R: right}).collect()) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
| bsd-3-clause |
RayleighChen/SummerVac | cai/1.py | 1 | 1956 | import pandas as pd
import numpy as np
import math
from sklearn.linear_model import LinearRegression
train = pd.read_csv('C:/Users/caijiawen/Desktop/train.csv')
test = pd.read_csv('C:/Users/caijiawen/Desktop/testt.csv')
rate=0.00001
#train=pd.double(train)
train=train.astype(float)
test=test.astype(float)
n=pd.Series([100,100,100,10000,1000,1000,10,10,1000000,100000,10,1000,10000000])
w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11,w12=0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01
b=0.01
for i in range(train.shape[0]):
for j in range (13):
train.iat[i,j]/=n[j]
for i in range(test.shape[0]):
for j in range (13):
test.iat[i,j]/=n[j]
for i in range (4000):
t=train['Y']-(b+train['1']*w1+train['2']*w2+train['3']*w3+train['4']*w4+train['5']*w5+train['6']*w6+train['7']*w7+train['8']*w8+train['9']*w9+train['10']*w10+train['11']*w11+train['12']*w12)
w1=w1-rate*np.sum(2*(t*(-train['1'])))
w2=w2-rate*np.sum(2*(t*(-train['2'])))
w3=w3-rate*np.sum(2*(t*(-train['3'])))
w4=w4-rate*np.sum(2*(t*(-train['4'])))
w5=w5-rate*np.sum(2*(t*(-train['5'])))
w6=w6-rate*np.sum(2*(t*(-train['6'])))
w7=w7-rate*np.sum(2*(t*(-train['7'])))
w8=w8-rate*np.sum(2*(t*(-train['8'])))
w9=w9-rate*np.sum(2*(t*(-train['9'])))
w10=w10-rate*np.sum(2*(t*(-train['10'])))
w11=w11-rate*np.sum(2*(t*(-train['11'])))
w12=w12-rate*np.sum(2*(t*(-train['12'])))
b=b-rate*np.sum(2*(t*(-1)))
ans=0
print(w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11,w12)
for i in range(test.shape[0]):
t=(b+test.iat[i,1]*w1+test.iat[i,2]*w2+test.iat[i,3]*w3+test.iat[i,4]*w4+test.iat[i,5]*w5+test.iat[i,6]*w6+test.iat[i,7]*w7+test.iat[i,8]*w8+test.iat[i,9]*w9+test.iat[i,10]*w10+test.iat[i,11]*w11+test.iat[i,12]*w12)
t=t*100
if(test.iat[i,12]*10000000<100):
t=0
# print(test.iat[i,12],test.iat[i,0]*100)
ans+=((t-test.iat[i,0]*100)**2)
print('均方差 %.2f' % math.sqrt(ans/test.shape[0]))
| gpl-2.0 |
ojengwa/sympy | sympy/plotting/tests/test_plot.py | 4 | 7541 | from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from tempfile import NamedTemporaryFile
import warnings
import os
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exceptiion handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
lambdify([x], Max(x, 5))
assert Max(2, 5) == 5
assert Max(7, 5) == 7
| bsd-3-clause |
blbarker/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_extract_keyword_test.py | 13 | 7033 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.extract_keywords functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import numpy
from lxml import etree
class DicomExtractKeywordsTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomExtractKeywordsTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.count = self.dicom.metadata.count()
def test_extract_one_column_basic(self):
"""test extract keyword with one col"""
self.dicom.extract_keywords(["PatientID"])
# ensure column was added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
# compare expected results with extract_keywords result
expected_result = self._get_expected_column_data_from_xml(["PatientID"])
take_result = self.dicom.metadata.take(self.count, columns=['PatientID'])
numpy.testing.assert_equal(take_result, expected_result)
def test_extract_multiple_columns_basic(self):
"""test extract keywords with mult cols"""
keywords = ["PatientID", "SOPInstanceUID"]
self.dicom.extract_keywords(keywords)
# ensure columns were added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
if u'SOPInstanceUID' not in columns:
raise Exception("SOPInstanceUID not added to columns")
# compare expected and actual result
expected_result = self._get_expected_column_data_from_xml(keywords)
take_result = self.dicom.metadata.take(self.count, columns=keywords)
numpy.testing.assert_equal(take_result, expected_result)
def test_extract_invalid_column(self):
"""test extract keyword with invalid column"""
self.dicom.extract_keywords(["invalid"])
# ensure column was added
columns = self.dicom.metadata.column_names
if u'invalid' not in columns:
raise Exception("Invalid column not added")
# compare expected and actual result
invalid_column = self.dicom.metadata.take(self.count, columns=[u'invalid'])
expected_result = [[None] for x in range(0, self.count)]
self.assertEqual(invalid_column, expected_result)
def test_extract_multiple_invalid_columns(self):
"""test extract keyword mult invalid cols"""
keywords = ["invalid", "another_invalid_col"]
self.dicom.extract_keywords(keywords)
# test that columns were added
columns = self.dicom.metadata.column_names
if u'invalid' not in columns:
raise Exception("invalid column not added to columns")
if u'another_invalid_col' not in columns:
raise Exception("another_invalid_col not added to columns")
# compare actual with expected result
invalid_columns = self.dicom.metadata.take(self.count, columns=keywords)
expected_result = [[None, None] for x in range(0, self.count)]
self.assertEqual(invalid_columns, expected_result)
def test_extract_invalid_valid_col_mix(self):
keywords = ["PatientID", "Invalid"]
self.dicom.extract_keywords(keywords)
# test that columns were added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
if u'Invalid' not in columns:
raise Exception("Invalid not added to columns")
# compare actual with expected result
take_result = self.dicom.metadata.take(self.count, columns=keywords)
expected_result = self._get_expected_column_data_from_xml(keywords)
numpy.testing.assert_equal(take_result, expected_result)
def test_extract_invalid_type(self):
with self.assertRaisesRegexp(Exception, "should be either str or list"):
self.dicom.extract_keywords(1)
def test_extract_unicode_columns(self):
keywords = [u'PatientID']
self.dicom.extract_keywords(keywords)
# test that column was added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
# compare actual with expected result
take_result = self.dicom.metadata.take(self.count, columns=keywords)
expected_result = self._get_expected_column_data_from_xml(keywords)
numpy.testing.assert_equal(take_result, expected_result)
def _get_expected_column_data_from_xml(self, tags):
# generate expected data by extracting the keywords ourselves
expected_column_data = []
# download to pandas for easy access
metadata_pandas = self.dicom.metadata.to_pandas()
# iterate through the metadata rows
for index, row in metadata_pandas.iterrows():
# convert metadata to ascii string
metadata = row["metadata"].encode("ascii", "ignore")
# create a lxml tree object from xml metadata
xml_root = etree.fromstring(metadata)
expected_row = []
for tag in tags:
# for lxml the search query means
# look for all DicomAttribute elements with
# attribute keyword equal to our keyword
# then get the value element underneath it and extract the
# inner text
tag_query = ".//DicomAttribute[@keyword='" + tag + "']/Value/text()"
query_result = xml_root.xpath(tag_query)
# if result is [] use None, otherwise format in unicode
result = query_result[0].decode("ascii", "ignore") if query_result else None
expected_row.append(result)
#expected_row.append(query_result)
expected_column_data.append(expected_row)
return expected_column_data
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
KonradBreitsprecher/espresso | samples/lj-demo.py | 1 | 19950 | #
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, division
import espressomd
from espressomd import thermostat
from espressomd import visualization
import numpy as np
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import pyplot
from threading import Thread
from traits.api import HasTraits, Button, Any, Range, List, Enum, Float
from traitsui.api import View, Group, Item, CheckListEditor, RangeEditor, EnumEditor
from espressomd.visualization_mayavi import mlab
try:
import midi
except:
try:
from pygame import midi
except:
from portmidi import midi
midi.init()
# if log flag is set, midi controller will change pressure logarithmically
pressure_log_flag = True
mayavi_autozoom = False # autozoom is buggy... works only for rotation
show_real_system_temperature = True
old_pressure = -1
# NPT variables
#############################################################
NPTGamma0 = 1.0
#NPTInitPistonMass = 1e-06
#NPTMinPistonMass = 1e-06
NPTMinPistonMass = 1e-04
NPTMaxPistonMass = 1.0
NPTInitPistonMass = NPTMinPistonMass
# System parameters
#############################################################
# 300 Particles
box_l = 7.5395
density = 0.7
#global_boxlen = box_l
#mainthread_boxlen = box_l
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
system.cell_system.set_n_square(use_verlet_lists=False)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1
int_n_times = 5000000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
system.analysis.dist_to(0)
act_min_dist = system.analysis.min_dist()
system.cell_system.max_num_cells = 2744
mayavi = visualization.mayaviLive(system)
mayavi_rotation_angle = 45.
mayavi_rotation_angle_step = 5.
mayavi_zoom = 35.949120941773977
mayavi_zoom_step = 3.
plot_max_data_len = 20
#############################################################
# GUI Controls #
#############################################################
inputs, outputs = [], []
for i in range(midi.get_count()):
interf, name, input, output, opened = midi.get_device_info(i)
if input:
inputs.append((i, interf + " " + name))
if output:
outputs.append((i, interf + " " + name))
class Controls(HasTraits):
if len(inputs) == 1:
default_input = inputs
for i in inputs:
if not "Through Port" in i[1]:
default_input = i
break
default_input = default_input if len(inputs) > 0 else None
default_output = -1
for i in outputs:
if not "Through Port" in i[1]:
default_output = i
break
else:
through_port_output = i
default_output = default_output if len(
outputs) > 1 else through_port_output
input_device = List(value=default_input,
editor=CheckListEditor(values=inputs))
output_device = List(value=default_output,
editor=CheckListEditor(values=outputs))
max_temp = 2.
min_temp = 0.5
max_press = 10.
min_press = 5e-4
max_vol = 100000.
min_vol = 50.
max_n = 1000
min_n = 50
temperature = Range(min_temp, max_temp, 1., )
volume = Float(box_l**3.)
pressure = Float(1.)
number_of_particles = Range(min_n, max_n, n_part, )
ensemble = Enum('NVT', 'NPT')
midi_input = midi.Input(default_input[0]) if len(inputs) > 1 else None
midi_output = midi.Output(default_output[0]) if len(outputs) > 1 else None
MIDI_BASE = 224
MIDI_NUM_TEMPERATURE = MIDI_BASE + 0
MIDI_NUM_VOLUME = MIDI_BASE + 1
MIDI_NUM_PRESSURE = MIDI_BASE + 2
MIDI_NUM_NUMBEROFPARTICLES = MIDI_BASE + 3
MIDI_ROTATE = 0
MIDI_ZOOM = 176
_ui = Any
view = View(
Group(
Item('temperature', editor=RangeEditor(
low_name='min_temp', high_name='max_temp')),
Item('volume', editor=RangeEditor(
low_name='min_vol', high_name='max_vol')),
Item('pressure', editor=RangeEditor(
low_name='min_press', high_name='max_press')),
Item('number_of_particles', editor=RangeEditor(
low_name='min_n', high_name='max_n', is_float=False)),
Item('ensemble', style='custom'),
show_labels=True,
label='Parameters'
),
Group(
Item('input_device'),
Item('output_device'),
show_labels=True,
label='MIDI devices'
),
buttons=[],
title='Control'
)
def __init__(self, **traits):
super(Controls, self).__init__(**traits)
self._ui = self.edit_traits()
self.push_current_values()
def push_current_values(self):
"""send the current values to the MIDI controller"""
self._temperature_fired()
self._volume_fired()
self._pressure_fired()
self._number_of_particles_fired()
self._ensemble_fired()
def _input_device_fired(self):
if self.midi_input is not None:
self.midi_input.close()
self.midi_input = midi.Input(self.input_device[0])
def _output_device_fired(self):
if self.midi_output is not None:
self.midi_output.close()
self.midi_output = midi.Output(self.output_device[0])
self.push_current_values()
def _temperature_fired(self):
status = self.MIDI_NUM_TEMPERATURE
data1 = int((self.temperature - self.min_temp) /
(self.max_temp - self.min_temp) * 127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _volume_fired(self):
status = self.MIDI_NUM_VOLUME
data1 = limit_range(int((system.box_l[0]**3. - self.min_vol) /
(self.max_vol - self.min_vol) * 127), minval=0, maxval=127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _pressure_fired(self):
status = self.MIDI_NUM_PRESSURE
if pressure_log_flag:
data1 = limit_range(int(127 * (np.log(self.pressure) - np.log(self.min_press)) / (
np.log(self.max_press) - np.log(self.min_press))), minval=0, maxval=127)
else:
data1 = limit_range(int((self.pressure - self.min_press) /
(self.max_press - self.min_press) * 127), minval=0, maxval=127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _number_of_particles_fired(self):
status = self.MIDI_NUM_NUMBEROFPARTICLES
data1 = int(self.number_of_particles / self.max_n * 127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _ensemble_fired(self):
if self.midi_output is not None:
self.midi_output.write_short(144, 0, 127) # T
self.midi_output.write_short(
144, 1, 127 * (self.ensemble != 'NPT')) # V
self.midi_output.write_short(
144, 2, 127 * (self.ensemble == 'NPT')) # P
self.midi_output.write_short(144, 3, 127) # N
#############################################################
# Warmup Integration #
#############################################################
# set LJ cap
lj_cap = 20
system.force_cap = lj_cap
# # Warmup Integration Loop
# i = 0
# while (i < warm_n_times and act_min_dist < min_dist):
# system.integrator.run(warm_steps)
# # Warmup criterion
# act_min_dist = system.analysis.min_dist()
# i += 1
#
# # Increase LJ cap
# lj_cap = lj_cap + 10
# system.force_cap = lj_cap
# mayavi.update()
#############################################################
# Integration #
#############################################################
# remove force capping
#lj_cap = 0
# system.force_cap = lj_cap
# get initial observables
pressure = system.analysis.pressure()
temperature = (system.part[:].v**2).sum() / 3.0
# TODO: this is some terrible polynomial fit, replace it with a better expression
# equation of state
pyplot.subplot(131)
pyplot.semilogy()
pyplot.title("Phase diagram")
pyplot.xlabel("Temperature")
pyplot.ylabel("Pressure")
pyplot.xlim(0.5, 2.0)
pyplot.ylim(5e-5, 2e1)
xx = np.linspace(0.5, 0.7, 200)
pyplot.plot(xx, -6.726 * xx**4 + 16.92 * xx**3 -
15.85 * xx**2 + 6.563 * xx - 1.015, 'k-')
xx = np.linspace(0.7, 1.3, 600)
pyplot.plot(xx, -0.5002 * xx**4 + 2.233 * xx**3 -
3.207 * xx**2 + 1.917 * xx - 0.4151, 'k-')
xx = np.linspace(0.6, 2.2, 1500)
pyplot.plot(xx, 16.72 * xx**4 - 88.28 * xx**3 +
168 * xx**2 - 122.4 * xx + 29.79, 'k-')
cursor = pyplot.scatter(temperature, pressure['total'], 200, 'g')
#cursor2 = pyplot.scatter(-1, -1, 200, 'r')
pyplot.text(0.6, 10, 'solid')
pyplot.text(1, 1, 'liquid')
pyplot.text(1, 10**-3, 'gas')
pyplot.subplot(132)
pyplot.title("Temperature")
plot1, = pyplot.plot([0], [temperature])
pyplot.xlabel("Time")
pyplot.ylabel("Temperature")
pyplot.subplot(133)
pyplot.title("Pressure")
plot2, = pyplot.plot([0], [pressure['total']])
pyplot.xlabel("Time")
pyplot.ylabel("Pressure")
# pyplot.legend()
pyplot.show(block=False)
plt1_x_data = np.zeros(1)
plt1_y_data = np.zeros(1)
plt2_x_data = np.zeros(1)
plt2_y_data = np.zeros(1)
def limit_range(val, minval=0., maxval=1.):
if val > maxval:
ret_val = maxval
elif val < minval:
ret_val = minval
else:
ret_val = val
if isinstance(val, int):
return int(ret_val)
elif isinstance(val, float):
return float(ret_val)
else:
return ret_val
def pressure_from_midi_val(midi_val, pmin, pmax, log_flag=pressure_log_flag):
if log_flag:
return pmin * (float(pmax) / pmin)**(float(midi_val) / 127)
else:
return (midi_val * (pmax - pmin) / 127 + pmin)
def main_loop():
global energies, plt1_x_data, plt1_y_data, plt2_x_data, plt2_y_data, old_pressure
system.integrator.run(steps=int_steps)
mayavi.update()
# make sure the parameters are valid
# not sure if this is necessary after using limit_range
if controls.volume == 0:
controls.volume = controls.min_vol
if controls.number_of_particles == 0:
controls.number_of_particles = 1
if controls.pressure == 0:
controls.pressure = controls.min_press
pressure = system.analysis.pressure()
# update the parameters set in the GUI
system.thermostat.set_langevin(kT=controls.temperature, gamma=1.0)
if controls.ensemble == 'NPT':
# reset Vkappa when target pressure has changed
if old_pressure != controls.pressure:
system.analysis.Vkappa('reset')
old_pressure = controls.pressure
newVkappa = system.analysis.Vkappa('read')['Vk1']
newVkappa = newVkappa if newVkappa > 0. else 4.0 / \
(NPTGamma0 * NPTGamma0 * NPTInitPistonMass)
pistonMass = limit_range(
4.0 / (NPTGamma0 * NPTGamma0 * newVkappa), NPTMinPistonMass, NPTMaxPistonMass)
system.integrator.set_isotropic_npt(
controls.pressure, pistonMass, cubic_box=True)
controls.volume = system.box_l[0]**3.
else:
system.integrator.set_nvt()
controls.pressure = pressure['total']
new_box = np.ones(3) * controls.volume**(1. / 3.)
if np.any(np.array(system.box_l) != new_box):
for i in range(len(system.part)):
system.part[i].pos *= new_box / system.box_l[0]
system.box_l = new_box
new_part = controls.number_of_particles
if new_part > len(system.part):
for i in range(len(system.part), new_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
elif new_part < len(system.part):
for i in range(new_part, len(system.part)):
system.part[i].remove()
# There should be no gaps in particle numbers
assert len(system.part) == system.part.highest_particle_id + 1
plt1_x_data = plot1.get_xdata()
plt1_y_data = plot1.get_ydata()
plt2_x_data = plot2.get_xdata()
plt2_y_data = plot2.get_ydata()
plt1_x_data = np.append(
plt1_x_data[-plot_max_data_len + 1:], system.time)
if show_real_system_temperature:
plt1_y_data = np.append(plt1_y_data[-plot_max_data_len + 1:], 2. / (
3. * len(system.part)) * system.analysis.energy()["kinetic"])
else:
plt1_y_data = np.append(
plt1_y_data[-plot_max_data_len + 1:], (system.part[:].v**2).sum())
plt2_x_data = np.append(
plt2_x_data[-plot_max_data_len + 1:], system.time)
plt2_y_data = np.append(
plt2_y_data[-plot_max_data_len + 1:], pressure['total'])
def main_thread():
for i in range(0, int_n_times):
main_loop()
def midi_thread():
global mayavi_rotation_angle, mayavi_zoom
while True:
try:
if controls.midi_input is not None and controls.midi_input.poll():
events = controls.midi_input.read(1000)
for event in events:
status, data1, data2, data3 = event[0]
if status == controls.MIDI_NUM_TEMPERATURE:
temperature = data2 * \
(controls.max_temp - controls.min_temp) / \
127 + controls.min_temp
controls.temperature = limit_range(
temperature, controls.min_temp, controls.max_temp)
elif status == controls.MIDI_NUM_VOLUME:
volume = data2 * \
(controls.max_vol - controls.min_vol) / \
127 + controls.min_vol
controls.volume = limit_range(
volume, controls.min_vol, controls.max_vol)
controls.ensemble = 'NVT'
elif status == controls.MIDI_NUM_PRESSURE:
pressure = pressure_from_midi_val(
data2, controls.min_press, controls.max_press)
controls.pressure = limit_range(
pressure, controls.min_press, controls.max_press)
controls.ensemble = 'NPT'
elif status == controls.MIDI_NUM_NUMBEROFPARTICLES:
npart = int(data2 * controls.max_n / 127)
controls.number_of_particles = limit_range(
npart, controls.min_n, controls.max_n)
elif status == controls.MIDI_ROTATE:
if data2 < 65:
# rotate clockwise
mayavi_rotation_angle += mayavi_rotation_angle_step * data2
elif data2 >= 65:
# rotate counterclockwise
mayavi_rotation_angle -= mayavi_rotation_angle_step * \
(data2 - 64)
elif status == controls.MIDI_ZOOM:
if data2 < 65:
# zoom in
mayavi_zoom -= mayavi_zoom_step * data2
elif data2 >= 65:
# zoom out
mayavi_zoom += mayavi_zoom_step * (data2 - 64)
# else:
# print("Unknown Status {0} with data1={1} and data2={2}".format(status, data1, data2))
except Exception as e:
print(e)
last_plotted = 0
def calculate_kinetic_energy():
tmp_kin_energy = 0.
for i in range(len(system.part)):
tmp_kin_energy += 1. / 2. * np.linalg.norm(system.part[i].v)**2.0
print("tmp_kin_energy={}".format(tmp_kin_energy))
print("system.analysis.energy()['kinetic']={}".format(
system.analysis.energy(system)["kinetic"]))
def rotate_scene():
global mayavi_rotation_angle
if mayavi_rotation_angle:
# mlab.yaw(mayavi_rotation_angle)
if mayavi_autozoom:
mlab.view(azimuth=mayavi_rotation_angle, distance='auto')
else:
current_view_vals = mlab.view()
mlab.view(azimuth=mayavi_rotation_angle,
elevation=current_view_vals[1], distance=current_view_vals[2], focalpoint=current_view_vals[3])
mayavi_rotation_angle %= 360.
def zoom_scene():
global mayavi_zoom
mlab.view(distance=mayavi_zoom)
def update_plot():
global last_plotted
# rotate_scene()
zoom_scene()
data_len = np.array([len(plt1_x_data), len(
plt1_y_data), len(plt2_x_data), len(plt2_y_data)]).min()
plot1.set_xdata(plt1_x_data[:data_len])
plot1.set_ydata(plt1_y_data[:data_len])
plot2.set_xdata(plt2_x_data[:data_len])
plot2.set_ydata(plt2_y_data[:data_len])
cursor.set_offsets([plt1_y_data[data_len - 1], plt2_y_data[data_len - 1]])
# cursor2.set_offsets([controls.temperature, controls.pressure])
current_time = plot1.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
plot1.axes.set_xlim(plot1.get_xdata()[0], plot1.get_xdata()[-1])
plot1.axes.set_ylim(0.8 * plot1.get_ydata().min(),
1.2 * plot1.get_ydata().max())
plot2.axes.set_xlim(plot2.get_xdata()[0], plot2.get_xdata()[-1])
plot2.axes.set_ylim(0.8 * plot2.get_ydata().min(),
1.2 * plot2.get_ydata().max())
pyplot.draw()
t = Thread(target=main_thread)
t.daemon = True
mayavi.registerCallback(update_plot, interval=1000)
controls = Controls()
t.start()
if controls.midi_input is not None:
t2 = Thread(target=midi_thread)
t2.daemon = True
t2.start()
mayavi.start()
| gpl-3.0 |
ematvey/tensorflow-seq2seq-tutorials | model_new.py | 1 | 15893 | # Working with TF commit 24466c2e6d32621cd85f0a78d47df6eed2c5c5a6
import math
import numpy as np
import tensorflow as tf
import tensorflow.contrib.seq2seq as seq2seq
from tensorflow.contrib.layers import safe_embedding_lookup_sparse as embedding_lookup_unique
from tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple, GRUCell
import helpers
class Seq2SeqModel():
"""Seq2Seq model usign blocks from new `tf.contrib.seq2seq`.
Requires TF 1.0.0-alpha"""
PAD = 0
EOS = 1
def __init__(self, encoder_cell, decoder_cell, vocab_size, embedding_size,
bidirectional=True,
attention=False,
debug=False):
self.debug = debug
self.bidirectional = bidirectional
self.attention = attention
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.encoder_cell = encoder_cell
self.decoder_cell = decoder_cell
self._make_graph()
@property
def decoder_hidden_units(self):
# @TODO: is this correct for LSTMStateTuple?
return self.decoder_cell.output_size
def _make_graph(self):
if self.debug:
self._init_debug_inputs()
else:
self._init_placeholders()
self._init_decoder_train_connectors()
self._init_embeddings()
if self.bidirectional:
self._init_bidirectional_encoder()
else:
self._init_simple_encoder()
self._init_decoder()
self._init_optimizer()
def _init_debug_inputs(self):
""" Everything is time-major """
x = [[5, 6, 7],
[7, 6, 0],
[0, 7, 0]]
xl = [2, 3, 1]
self.encoder_inputs = tf.constant(x, dtype=tf.int32, name='encoder_inputs')
self.encoder_inputs_length = tf.constant(xl, dtype=tf.int32, name='encoder_inputs_length')
self.decoder_targets = tf.constant(x, dtype=tf.int32, name='decoder_targets')
self.decoder_targets_length = tf.constant(xl, dtype=tf.int32, name='decoder_targets_length')
def _init_placeholders(self):
""" Everything is time-major """
self.encoder_inputs = tf.placeholder(
shape=(None, None),
dtype=tf.int32,
name='encoder_inputs',
)
self.encoder_inputs_length = tf.placeholder(
shape=(None,),
dtype=tf.int32,
name='encoder_inputs_length',
)
# required for training, not required for testing
self.decoder_targets = tf.placeholder(
shape=(None, None),
dtype=tf.int32,
name='decoder_targets'
)
self.decoder_targets_length = tf.placeholder(
shape=(None,),
dtype=tf.int32,
name='decoder_targets_length',
)
def _init_decoder_train_connectors(self):
"""
During training, `decoder_targets`
and decoder logits. This means that their shapes should be compatible.
Here we do a bit of plumbing to set this up.
"""
with tf.name_scope('DecoderTrainFeeds'):
sequence_size, batch_size = tf.unstack(tf.shape(self.decoder_targets))
EOS_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.EOS
PAD_SLICE = tf.ones([1, batch_size], dtype=tf.int32) * self.PAD
self.decoder_train_inputs = tf.concat([EOS_SLICE, self.decoder_targets], axis=0)
self.decoder_train_length = self.decoder_targets_length + 1
decoder_train_targets = tf.concat([self.decoder_targets, PAD_SLICE], axis=0)
decoder_train_targets_seq_len, _ = tf.unstack(tf.shape(decoder_train_targets))
decoder_train_targets_eos_mask = tf.one_hot(self.decoder_train_length - 1,
decoder_train_targets_seq_len,
on_value=self.EOS, off_value=self.PAD,
dtype=tf.int32)
decoder_train_targets_eos_mask = tf.transpose(decoder_train_targets_eos_mask, [1, 0])
# hacky way using one_hot to put EOS symbol at the end of target sequence
decoder_train_targets = tf.add(decoder_train_targets,
decoder_train_targets_eos_mask)
self.decoder_train_targets = decoder_train_targets
self.loss_weights = tf.ones([
batch_size,
tf.reduce_max(self.decoder_train_length)
], dtype=tf.float32, name="loss_weights")
def _init_embeddings(self):
with tf.variable_scope("embedding") as scope:
# Uniform(-sqrt(3), sqrt(3)) has variance=1.
sqrt3 = math.sqrt(3)
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3)
self.embedding_matrix = tf.get_variable(
name="embedding_matrix",
shape=[self.vocab_size, self.embedding_size],
initializer=initializer,
dtype=tf.float32)
self.encoder_inputs_embedded = tf.nn.embedding_lookup(
self.embedding_matrix, self.encoder_inputs)
self.decoder_train_inputs_embedded = tf.nn.embedding_lookup(
self.embedding_matrix, self.decoder_train_inputs)
def _init_simple_encoder(self):
with tf.variable_scope("Encoder") as scope:
(self.encoder_outputs, self.encoder_state) = (
tf.nn.dynamic_rnn(cell=self.encoder_cell,
inputs=self.encoder_inputs_embedded,
sequence_length=self.encoder_inputs_length,
time_major=True,
dtype=tf.float32)
)
def _init_bidirectional_encoder(self):
with tf.variable_scope("BidirectionalEncoder") as scope:
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_state,
encoder_bw_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=self.encoder_cell,
cell_bw=self.encoder_cell,
inputs=self.encoder_inputs_embedded,
sequence_length=self.encoder_inputs_length,
time_major=True,
dtype=tf.float32)
)
self.encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
if isinstance(encoder_fw_state, LSTMStateTuple):
encoder_state_c = tf.concat(
(encoder_fw_state.c, encoder_bw_state.c), 1, name='bidirectional_concat_c')
encoder_state_h = tf.concat(
(encoder_fw_state.h, encoder_bw_state.h), 1, name='bidirectional_concat_h')
self.encoder_state = LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
elif isinstance(encoder_fw_state, tf.Tensor):
self.encoder_state = tf.concat((encoder_fw_state, encoder_bw_state), 1, name='bidirectional_concat')
def _init_decoder(self):
with tf.variable_scope("Decoder") as scope:
def output_fn(outputs):
return tf.contrib.layers.linear(outputs, self.vocab_size, scope=scope)
if not self.attention:
decoder_fn_train = seq2seq.simple_decoder_fn_train(encoder_state=self.encoder_state)
decoder_fn_inference = seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=self.encoder_state,
embeddings=self.embedding_matrix,
start_of_sequence_id=self.EOS,
end_of_sequence_id=self.EOS,
maximum_length=tf.reduce_max(self.encoder_inputs_length) + 3,
num_decoder_symbols=self.vocab_size,
)
else:
# attention_states: size [batch_size, max_time, num_units]
attention_states = tf.transpose(self.encoder_outputs, [1, 0, 2])
(attention_keys,
attention_values,
attention_score_fn,
attention_construct_fn) = seq2seq.prepare_attention(
attention_states=attention_states,
attention_option="bahdanau",
num_units=self.decoder_hidden_units,
)
decoder_fn_train = seq2seq.attention_decoder_fn_train(
encoder_state=self.encoder_state,
attention_keys=attention_keys,
attention_values=attention_values,
attention_score_fn=attention_score_fn,
attention_construct_fn=attention_construct_fn,
name='attention_decoder'
)
decoder_fn_inference = seq2seq.attention_decoder_fn_inference(
output_fn=output_fn,
encoder_state=self.encoder_state,
attention_keys=attention_keys,
attention_values=attention_values,
attention_score_fn=attention_score_fn,
attention_construct_fn=attention_construct_fn,
embeddings=self.embedding_matrix,
start_of_sequence_id=self.EOS,
end_of_sequence_id=self.EOS,
maximum_length=tf.reduce_max(self.encoder_inputs_length) + 3,
num_decoder_symbols=self.vocab_size,
)
(self.decoder_outputs_train,
self.decoder_state_train,
self.decoder_context_state_train) = (
seq2seq.dynamic_rnn_decoder(
cell=self.decoder_cell,
decoder_fn=decoder_fn_train,
inputs=self.decoder_train_inputs_embedded,
sequence_length=self.decoder_train_length,
time_major=True,
scope=scope,
)
)
self.decoder_logits_train = output_fn(self.decoder_outputs_train)
self.decoder_prediction_train = tf.argmax(self.decoder_logits_train, axis=-1, name='decoder_prediction_train')
scope.reuse_variables()
(self.decoder_logits_inference,
self.decoder_state_inference,
self.decoder_context_state_inference) = (
seq2seq.dynamic_rnn_decoder(
cell=self.decoder_cell,
decoder_fn=decoder_fn_inference,
time_major=True,
scope=scope,
)
)
self.decoder_prediction_inference = tf.argmax(self.decoder_logits_inference, axis=-1, name='decoder_prediction_inference')
def _init_optimizer(self):
logits = tf.transpose(self.decoder_logits_train, [1, 0, 2])
targets = tf.transpose(self.decoder_train_targets, [1, 0])
self.loss = seq2seq.sequence_loss(logits=logits, targets=targets,
weights=self.loss_weights)
self.train_op = tf.train.AdamOptimizer().minimize(self.loss)
def make_train_inputs(self, input_seq, target_seq):
inputs_, inputs_length_ = helpers.batch(input_seq)
targets_, targets_length_ = helpers.batch(target_seq)
return {
self.encoder_inputs: inputs_,
self.encoder_inputs_length: inputs_length_,
self.decoder_targets: targets_,
self.decoder_targets_length: targets_length_,
}
def make_inference_inputs(self, input_seq):
inputs_, inputs_length_ = helpers.batch(input_seq)
return {
self.encoder_inputs: inputs_,
self.encoder_inputs_length: inputs_length_,
}
def make_seq2seq_model(**kwargs):
args = dict(encoder_cell=LSTMCell(10),
decoder_cell=LSTMCell(20),
vocab_size=10,
embedding_size=10,
attention=True,
bidirectional=True,
debug=False)
args.update(kwargs)
return Seq2SeqModel(**args)
def train_on_copy_task(session, model,
length_from=3, length_to=8,
vocab_lower=2, vocab_upper=10,
batch_size=100,
max_batches=5000,
batches_in_epoch=1000,
verbose=True):
batches = helpers.random_sequences(length_from=length_from, length_to=length_to,
vocab_lower=vocab_lower, vocab_upper=vocab_upper,
batch_size=batch_size)
loss_track = []
try:
for batch in range(max_batches+1):
batch_data = next(batches)
fd = model.make_train_inputs(batch_data, batch_data)
_, l = session.run([model.train_op, model.loss], fd)
loss_track.append(l)
if verbose:
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
print(' minibatch loss: {}'.format(session.run(model.loss, fd)))
for i, (e_in, dt_pred) in enumerate(zip(
fd[model.encoder_inputs].T,
session.run(model.decoder_prediction_train, fd).T
)):
print(' sample {}:'.format(i + 1))
print(' enc input > {}'.format(e_in))
print(' dec train predicted > {}'.format(dt_pred))
if i >= 2:
break
print()
except KeyboardInterrupt:
print('training interrupted')
return loss_track
if __name__ == '__main__':
import sys
if 'fw-debug' in sys.argv:
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model(debug=True)
session.run(tf.global_variables_initializer())
session.run(model.decoder_prediction_train)
session.run(model.decoder_prediction_train)
elif 'fw-inf' in sys.argv:
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model()
session.run(tf.global_variables_initializer())
fd = model.make_inference_inputs([[5, 4, 6, 7], [6, 6]])
inf_out = session.run(model.decoder_prediction_inference, fd)
print(inf_out)
elif 'train' in sys.argv:
tracks = {}
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model(attention=True)
session.run(tf.global_variables_initializer())
loss_track_attention = train_on_copy_task(session, model)
tf.reset_default_graph()
with tf.Session() as session:
model = make_seq2seq_model(attention=False)
session.run(tf.global_variables_initializer())
loss_track_no_attention = train_on_copy_task(session, model)
import matplotlib.pyplot as plt
plt.plot(loss_track)
print('loss {:.4f} after {} examples (batch_size={})'.format(loss_track[-1], len(loss_track)*batch_size, batch_size))
else:
tf.reset_default_graph()
session = tf.InteractiveSession()
model = make_seq2seq_model(debug=False)
session.run(tf.global_variables_initializer())
fd = model.make_inference_inputs([[5, 4, 6, 7], [6, 6]])
inf_out = session.run(model.decoder_prediction_inference, fd) | mit |
serazing/xscale | xscale/signal/tests/test_fitting.py | 1 | 5652 | # Python 2/3 compatibility
from __future__ import absolute_import, division, print_function
import xarray as xr
import numpy as np
import pandas as pd
import xscale.signal.fitting as xfit
def test_polyfit():
Nt, Nx, Ny = 100, 128, 128
rand = xr.DataArray(np.random.rand(Nt, Nx, Ny), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * rand.x / Nx), dims=['x'])
truth = rand + slopes * rand.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
linfit = xfit.polyfit(truth, dim='time').load()
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
assert np.allclose(linfit.sel(degree=1).mean(dim='y').data, slopes.data,
rtol=5e-2, atol=1e-3)
def test_linreg():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
xfit.polyfit(truth.to_dataset(name='truth'), dim='time').load()
slopes_fitted, offsets_fitted = xfit.linreg(truth, dim='time')
assert np.allclose(slopes, slopes_fitted.mean(dim='y').load())
assert np.allclose(offset, offsets_fitted.mean(dim='y').load())
def test_trend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
trend_mean = xfit.trend(offset, dim='time', type='constant')
trend_linear = xfit.trend(truth, dim='time', type='linear')
assert np.allclose(offset, trend_mean.load())
assert np.allclose(truth, trend_linear.load())
def test_detrend():
nt, nx, ny = 100, 128, 128
offset = 0.7 * xr.DataArray(np.ones((nt, nx, ny)), dims=['time', 'x', 'y'])
slopes = 0.02 * xr.DataArray(np.cos(2 * np.pi * offset.x / nx), dims=['x'])
truth = offset + slopes * offset.time
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
assert np.allclose(0 * offset, xfit.detrend(offset, dim='time',
type='constant').load())
assert np.allclose(0 * offset, xfit.detrend(truth, dim='time',
type='linear').load())
def test_sinfit():
Nt, Nx, Ny = 100, 128, 128
zeros = xr.DataArray(np.zeros((Nt, Nx, Ny)), dims=['time', 'x', 'y'])
zeros = zeros.assign_coords(time=pd.date_range(start='2011-01-01',
periods=100, freq='H'))
offset = 0.4
amp1, phi1 = 1.2, 0.
wave1 = amp1 * np.sin(2 * np.pi * zeros['time.hour'] / 24. +
phi1 * np.pi / 180.)
amp2, phi2 = 1.9, 60.
wave2 = amp2 * np.sin(2 * np.pi * zeros['time.hour'] / 12. +
phi2 * np.pi / 180.)
truth = offset + zeros + wave1 + wave2
truth = truth.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
# Fit both waves
fit2w = xfit.sinfit(truth, dim='time', periods=[24, 12], unit='h').load()
assert np.isclose(fit2w['amplitude'].sel(periods=24).isel(x=10, y=10), amp1)
assert np.isclose(fit2w['phase'].sel(periods=24).isel(x=10, y=10), phi1,
atol=1e-4)
assert np.isclose(fit2w['amplitude'].sel(periods=12).isel(x=10, y=10), amp2)
assert np.isclose(fit2w['phase'].sel(periods=12).isel(x=10, y=10), phi2)
assert np.isclose(fit2w['offset'].isel(x=10, y=10), offset)
# Fit only one wave (wave2)
fit1w = xfit.sinfit(truth, dim='time', periods=12, unit='h').load()
# Compare with 5% relative tolerance (error induced by wave1)
assert np.isclose(fit1w['amplitude'].sel(periods=12).isel(x=10, y=10),
amp2, rtol=5e-2)
assert np.isclose(fit1w['phase'].sel(periods=12).isel(x=10, y=10),
phi2, rtol=5e-2)
# Fit only one dimensional data
xfit.sinfit(truth.isel(x=0, y=0), dim='time',
periods=[24, 12],
unit='h').load()
def test_sinval():
Nt, Nx, Ny = 100, 128, 128
offset = 0.4
periods = [24., 12.]
amp1, phi1 = 1.2, 0.
amp2, phi2 = 1.9, 60.
time = xr.DataArray(pd.date_range(start='2011-01-01',
periods=Nt,
freq='H'),
dims='time')
amp = xr.DataArray([amp1, amp2], dims='periods')
phi = xr.DataArray([phi1, phi2], dims='periods')
ones = xr.DataArray(np.ones((Nx, Ny)), dims=['x', 'y'])
var_dict = {'amplitude': amp * ones,
'phase': phi * ones,
'offset': offset * ones}
ds = xr.Dataset(var_dict).chunk(chunks={'x': 50, 'y': 50})
ds = ds.assign_coords(periods=periods)
ds['periods'].attrs['units'] = 'h'
xfit.sinval(ds, time)
#One mode reconstruction
xfit.sinval(ds.sel(periods=[24,]), time)
def test_order_and_stack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
assert rand_stacked.dims[0] is 'y'
assert rand_stacked.dims[-1] is 'temp_dim'
assert rand_stacked.shape[-1] == 128 * 100
# Test the exception for 1d array
rand1d = rand.isel(time=0, x=0)
rand1d_stacked = xfit._order_and_stack(rand1d, 'y')
assert np.array_equal(rand1d_stacked, rand1d)
def test_unstack():
rand = xr.DataArray(np.random.rand(100, 128, 128), dims=['time', 'x', 'y'])
rand = rand.chunk(chunks={'time': 20, 'x': 50, 'y': 50})
rand_stacked = xfit._order_and_stack(rand, 'y')
rand_unstacked = xfit._unstack(rand_stacked.mean(dim='y'))
assert rand_unstacked.dims == ('time', 'x')
assert rand_unstacked.shape == (100, 128) | apache-2.0 |
deokwooj/DDEA | webgui/influxdb/influxdb08/dataframe_client.py | 1 | 6156 | # -*- coding: utf-8 -*-
"""
DataFrame client for InfluxDB
"""
import math
import warnings
from .client import InfluxDBClient
class DataFrameClient(InfluxDBClient):
"""
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
def __init__(self, *args, **kwargs):
super(DataFrameClient, self).__init__(*args, **kwargs)
try:
global pd
import pandas as pd
except ImportError as ex:
raise ImportError(
'DataFrameClient requires Pandas, "{ex}" problem importing'
.format(ex=str(ex))
)
self.EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
def write_points(self, data, *args, **kwargs):
"""
Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(
len(data_frame) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
data = [self._convert_dataframe_to_json(
name=key,
dataframe=data_frame.ix[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points(self, data, *args, **kwargs)
return True
else:
data = [self._convert_dataframe_to_json(
name=key, dataframe=dataframe, time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points(self, data, *args, **kwargs)
def write_points_with_precision(self, data, time_precision='s'):
"""
DEPRECATED. Write to multiple time series names
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``DataFrameClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self.write_points(data, time_precision='s')
def query(self, query, time_precision='s', chunked=False):
"""
Quering data into DataFrames.
Returns a DataFrame for a single time series and a map for multiple
time series with the time series as value and its name as key.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
result = InfluxDBClient.query(self, query=query,
time_precision=time_precision,
chunked=chunked)
if len(result) == 0:
return result
elif len(result) == 1:
return self._to_dataframe(result[0], time_precision)
else:
return {time_series['name']: self._to_dataframe(time_series,
time_precision)
for time_series in result}
def _to_dataframe(self, json_result, time_precision):
dataframe = pd.DataFrame(data=json_result['points'],
columns=json_result['columns'])
if 'sequence_number' in dataframe.keys():
dataframe.sort(['time', 'sequence_number'], inplace=True)
else:
dataframe.sort(['time'], inplace=True)
pandas_time_unit = time_precision
if time_precision == 'm':
pandas_time_unit = 'ms'
elif time_precision == 'u':
pandas_time_unit = 'us'
dataframe.index = pd.to_datetime(list(dataframe['time']),
unit=pandas_time_unit,
utc=True)
del dataframe['time']
return dataframe
def _convert_dataframe_to_json(self, dataframe, name, time_precision='s'):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.tseries.period.PeriodIndex) or
isinstance(dataframe.index, pd.tseries.index.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or \
PeriodIndex.')
dataframe.index = dataframe.index.to_datetime()
if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')
dataframe['time'] = [self._datetime_to_epoch(dt, time_precision)
for dt in dataframe.index]
data = {'name': name,
'columns': [str(column) for column in dataframe.columns],
'points': list([list(x) for x in dataframe.values])}
return data
def _datetime_to_epoch(self, datetime, time_precision='s'):
seconds = (datetime - self.EPOCH).total_seconds()
if time_precision == 's':
return seconds
elif time_precision == 'm' or time_precision == 'ms':
return seconds * 1000
elif time_precision == 'u':
return seconds * 1000000
| gpl-2.0 |
davidgbe/scikit-learn | sklearn/neighbors/approximate.py | 71 | 22357 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
bikash/h2o-dev | h2o-py/tests/testdir_algos/glm/pyunit_link_functions_gammaGLM.py | 1 | 2123 | import sys
sys.path.insert(1, "../../../")
import h2o
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_gamma(ip,port):
# Connect to h2o
h2o.init(ip,port)
print("Read in prostate data.")
h2o_data = h2o.import_frame(path=h2o.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(h2o.locate("smalldata/prostate/prostate_complete.csv.zip")).open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,5]
sm_data_features = sm_data[:,[1,2,3,4,6,7,8,9]]
print("Testing for family: GAMMA")
print("Set variables for h2o.")
myY = "DPROS"
myX = ["ID","AGE","RACE","GLEASON","DCAPS","PSA","VOL","CAPSULE"]
print("Create models with canonical link: INVERSE")
h2o_model_in = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="gamma", link="inverse",alpha=[0.5], Lambda=[0])
sm_model_in = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Gamma(sm.families.links.inverse_power)).fit()
print("Compare model deviances for link function inverse")
h2o_deviance_in = h2o_model_in._model_json['output']['residual_deviance'] / h2o_model_in._model_json['output']['null_deviance']
sm_deviance_in = sm_model_in.deviance / sm_model_in.null_deviance
assert h2o_deviance_in - sm_deviance_in < 0.01, "expected h2o to have an equivalent or better deviance measures"
print("Create models with canonical link: LOG")
h2o_model_log = h2o.glm(x=h2o_data[myX], y=h2o_data[myY], family="gamma", link="log",alpha=[0.5], Lambda=[0])
sm_model_log = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Gamma(sm.families.links.log)).fit()
print("Compare model deviances for link function log")
h2o_deviance_log = h2o_model_log._model_json['output']['residual_deviance'] / h2o_model_log._model_json['output']['null_deviance']
sm_deviance_log = sm_model_log.deviance / sm_model_log.null_deviance
assert h2o_deviance_log - sm_deviance_log < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_functions_gamma)
| apache-2.0 |
vdods/heisenberg | attic/lagrange-multiplier.py | 1 | 21339 | import sys
sys.path.append('library')
import cmath
import fourier
import heisenberg_dynamics
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
import tensor
import vector_field
def vector (X):
return np.array(X)
def matrix (X):
row_count = len(X)
assert row_count > 0
column_count = len(X[0])
assert all(len(row) == column_count for row in X)
return np.array(X).reshape(row_count, column_count)
def squared_norm (X):
return sum(x**2 for x in X.flat)
def squared_L2_norm (X):
return sum(x**2 for x in X.flat) / len(X.flat)
def squared_complex_norm (z):
return z.real**2 + z.imag**2
def squared_complex_L2_norm (Z):
return sum(squared_complex_norm(z) for z in Z) / len(Z)
def generated_zeta (M):
M_inv = {m:i for i,m in enumerate(M)}
L = list(frozenset(m1-m2 for m1 in M for m2 in M))
L.sort()
L_inv = {l:i for i,l in enumerate(L)}
# T is the 3-tensor defined by T:(w \otimes z) = \bar{w}z, where w and z are
# complex numbers identified as points in \mathbb{R}^2.
T = np.zeros((2,2,2))
T[0,0,0] = 1.0
T[0,1,1] = 1.0
T[1,0,1] = 1.0
T[1,1,0] = -1.0
# Mult is the 3-tensor defining the quadratic function zeta_M.
Mult = np.zeros((2*len(L), 2*len(M), 2*len(M)))
for l in L:
if l == 0:
continue
i = L_inv[l]
for m in M:
if l+m not in M:
continue
j = M_inv[m]
k = M_inv[l+m]
Mult[2*i:2*(i+1),2*j:2*(j+1),2*k:2*(k+1)] += T*(l+m)/(2*l)
def zeta (R):
assert len(R) == 2*len(M), 'not enough input params.'
# return np.einsum('ijk,j,k', Mult, R, R)
return tensor.contract('ijk,j,k', Mult, R, R)
def D_zeta ((R,V)):
assert len(R) == 2*len(M), 'not enough input params.'
assert len(V) == 2*len(M), 'not enough input params.'
# Product rule (could probably simplify this due to some symmetry in Mult).
# return (zeta(R), np.einsum('ijk,j,k', Mult, V, R) + np.einsum('ijk,j,k', Mult, R, V))
return (zeta(R), tensor.contract('ijk,j,k', Mult, V, R) + tensor.contract('ijk,j,k', Mult, R, V))
return zeta,D_zeta,L
def as_real_vector (complex_vector):
elements = []
for z in complex_vector:
elements.append(z.real)
elements.append(z.imag)
return vector(elements)
def generated_Q (M, omega):
half_omega = 0.5*omega
# TODO: get rid of M_inv here
M_inv = {m:i for i,m in enumerate(M)}
def Q (R):
assert len(R.shape) == 1
assert len(R) == 2*len(M), 'not enough input params.'
return half_omega*sum(m*squared_norm(R[2*M_inv[m]:2*(M_inv[m]+1)]) for m in M)
def D_Q ((R,V)):
assert len(R.shape) == 1
assert len(V.shape) == 1
assert len(R) == 2*len(M), 'not enough input params.'
assert len(V) == 2*len(M), 'not enough input params.'
# return (Q(R), omega*sum(m*np.einsum('i,i', R[2*M_inv[m]:2*(M_inv[m]+1)], V[2*M_inv[m]:2*(M_inv[m]+1)]) for m in M))
return (Q(R), omega*sum(m*tensor.contract('i,i', R[2*M_inv[m]:2*(M_inv[m]+1)], V[2*M_inv[m]:2*(M_inv[m]+1)]) for m in M))
return Q, D_Q
def antiderivative_of (X, sample_times):
s = 0.0
retval = [s]
deltas = [sample_times[i+1]-sample_times[i] for i in range(len(sample_times)-1)]
for (x,delta) in itertools.izip(X,deltas):
s += x*delta
retval.append(s)
return vector(retval)
def derivative_of (X, sample_times):
deltas = [sample_times[i+1]-sample_times[i] for i in range(len(sample_times)-1)]
return vector([(X[i+1]-X[i])/delta for i,delta in enumerate(deltas)])
def definite_integral (sample_times):
assert sample_times[-1] - sample_times[0] > 0, 'Must be positively oriented, nonempty sampled interval.'
L = sample_times[-1] - sample_times[0]
integral_covector = np.ndarray((len(sample_times)-1,), dtype=float)
for i in range(len(sample_times)-1):
integral_covector[i] = sample_times[i+1]-sample_times[i]
def I (X):
# return np.einsum('j,j', integral_covector, X)
return tensor.contract('j,j', integral_covector, X)
def D_I ((X,V)):
return (I(X),I(V))
return I, D_I
def imag (z):
return z.imag
def D_imag ((z,v)):
return (imag(z),imag(v))
def imag_v (Z):
return vector([imag(z) for z in Z])
def D_imag_v ((Z,V)):
return (imag_v(Z),imag_v(V))
def realify (z):
return (z.real, z.imag)
def D_realify ((z,v)):
return (realify(z),realify(v))
def realify_v (Z):
return matrix([[z.real, z.imag] for z in Z]).reshape(2*len(Z))
def D_realify_v ((Z,V)):
return (realify_v(Z),realify_v(V))
def complexify (x):
assert len(x) == 2
return complex(x[0], x[1])
def D_complexify ((x,v)):
return (complexify(x),complexify(v))
def complexify_v (X):
assert len(X.flat) % 2 == 0
n = len(X.flat) / 2
return vector([complex(X.flat[2*i], X.flat[2*i+1]) for i in range(n)])
def D_complexify_v ((X,V)):
return (complexify_v(X),complexify_v(V))
def chi (U):
assert len(U) == 2
assert len(U[0]) == 2*len(U[1])
n = len(U[1])
return matrix([[U[0][2*i],U[0][2*i+1],U[1][i]] for i in range(n)])
def test_chi ():
Theta = np.linspace(0.0, 1.0, 11)
A = [(x,x**2) for x in Theta]
B = [2.0*x for x in Theta]
C = chi([A,B])
print C
def D_chi ((U,V)):
# chi is a linear map, so its differential is very simple.
assert len(U) == 2
assert len(V) == 2
X = chi((U[0],V[0]))
Y = chi((U[1],V[1]))
return (X,Y)
def eta (U):
assert len(U) == 2
assert U[0].shape == U[1].shape
n = U[0].shape[1]
retval = np.ndarray((U[0].shape[0],2*n), dtype=U[0].dtype)
retval[:,:n] = U[0]
retval[:,n:] = U[1]
return retval
def test_eta ():
U = [np.random.randn(*(3,5)) for _ in range(2)]
eta_U = eta(U)
print 'U[0] = {0}'.format(U[0])
print 'U[1] = {0}'.format(U[1])
print 'eta_U = {0}'.format(eta_U)
def D_eta ((U,V)):
# eta is a linear map, so its differential is very simple.
return (eta((U[0],V[0])),eta((U[1],V[1])))
def cartesian_product_of_functions_with_shared_domain (*functions):
def _ (x):
retval = tuple(f(x) for f in functions)
return retval
return _
def composition_of_functions (*functions):
def _ (x):
retval = x
for f in reversed(functions):
retval = f(retval)
return retval
return _
def sum_of_functions_with_shared_domain (*functions):
def _ (x):
return sum(f(x) for f in functions)
return _
def sum_of_tuples (*tuples):
return (sum(t) for t in itertools.izip(*tuples))
def direct_sum_of_functions_with_shared_domain (*functions):
return lambda x : sum_of_tuples(*[f(x) for f in functions])
# alpha = 2.0 / math.pi
alpha = 1.0
beta = 16.0 # This used to incorrectly be 1/16
def Lagrangian (pv):
mu = (pv[0]**2 + pv[1]**2)**2 + beta*pv[2]**2
# First term is kinetic energy, second is negative potential.
return 0.5*(pv[3]**2 + pv[4]**2) + alpha/math.sqrt(mu)
def D_Lagrangian ((pv, pv_prime)):
assert len(pv) == 6
assert len(pv_prime) == 6
r_squared = pv[0]**2 + pv[1]**2
mu = (r_squared)**2 + beta*pv[2]**2
mu_to_negative_three_halves = pow(mu,-1.5)
# TODO: factor this so it's fewer operations
return (Lagrangian(pv), \
-2.0*alpha*r_squared*pv[0]*mu_to_negative_three_halves*pv_prime[0] \
+ -2.0*alpha*r_squared*pv[1]*mu_to_negative_three_halves*pv_prime[1] \
+ -alpha*beta*pv[2]*mu_to_negative_three_halves*pv_prime[2] \
+ pv[3]*pv_prime[3] \
+ pv[4]*pv_prime[4])
def Lagrangian_v (PV):
return vector([Lagrangian(pv) for pv in PV])
def D_Lagrangian_v ((PV,PV_prime)):
# Each of PV and PV_prime are matrices of size 6 by N, where N is the number of samples
assert PV.shape == PV_prime.shape
assert PV.dtype == PV_prime.dtype
assert PV.shape[1] == 6
retval = (np.ndarray((PV.shape[0],), dtype=PV.dtype), np.ndarray((PV.shape[0],), dtype=PV.dtype))
for r,pv_pv_prime in enumerate(itertools.izip(PV,PV_prime)):
DL = D_Lagrangian(pv_pv_prime)
retval[0][r] = DL[0]
retval[1][r] = DL[1]
return retval
def Hamiltonian (pv):
mu = (pv[0]**2 + pv[1]**2)**2 + beta*pv[2]**2
# First term is kinetic energy, second is potential.
return 0.5*(pv[3]**2 + pv[4]**2) - alpha/math.sqrt(mu)
def Hamiltonian_v (PV):
return vector([Hamiltonian(pv) for pv in PV])
def cotangent_vector (tangent_vector):
assert len(tangent_vector) == 6
# x' = p_x - 0.5*y*p_z
# y' = p_y + 0.5*x*p_z
# z' = p_z
# This implies that
# p_x = x' + 0.5*y*z'
# p_y = y' - 0.5*x*z'
# p_z = z'
x = tangent_vector[0]
y = tangent_vector[1]
z = tangent_vector[2]
x_dot = tangent_vector[3]
y_dot = tangent_vector[4]
z_dot = tangent_vector[5]
return vector([x, y, z, x_dot+0.5*y*z_dot, y_dot-0.5*x*z_dot, z_dot])
#test_chi()
#test_eta()
def main ():
period = 273.5
omega = cmath.exp(2.0j*math.pi/period)
# Generate the modes M with symmetry.
sym_class = 2
k = 6
fold = 5
M = range(sym_class-fold*k,sym_class+fold*k+1,fold)
M_inv = {m:i for i,m in enumerate(M)}
zeta_M,D_zeta_M,L = generated_zeta(M)
L_inv = {l:i for i,l in enumerate(L)}
#Q_M = generated_Q(M, omega)
Q_M, D_Q_M = generated_Q(M, omega)
sample_count = 1000
sample_times = np.linspace(0.0, period, sample_count)
F_M = fourier.Transform(M, sample_times)
F_L = fourier.Transform(L, sample_times)
I, D_I = definite_integral(sample_times)
sample_times = sample_times[:-1]
linear = sample_times
#linear_term = lambda R : linear*Q_M(R)
def linear_term (R):
Q = Q_M(R)
return vector([Q*t for t in sample_times])
def D_linear_term ((R,V)):
d = D_Q_M((R,V))
return (linear*d[0], linear*d[1])
constant = vector([1 for _ in sample_times])
def constant_term (R):
Q = Q_M(R)
return vector([Q for _ in sample_times])
def D_constant_term ((R,V)):
d = D_Q_M((R,V))
return (constant*d[0], constant*d[1])
# Fourier transforms
#FT_M = lambda C : F_M.sampled_sum_of(C)
#FT_L = lambda C : F_L.sampled_sum_of(C)
def FT_M (C):
print 'calling FT_M'
retval = F_M.sampled_sum_of(C)
return retval.reshape(retval.size)
def FT_L (C):
print 'calling FT_L'
retval = F_L.sampled_sum_of(C)
return retval.reshape(retval.size)
# FT_M and FT_L are linear, so their differentials are very simple.
D_FT_M = lambda (C,V) : (FT_M(C),FT_M(V))
D_FT_L = lambda (C,V) : (FT_L(C),FT_L(V))
# Derivative with respect to Fourier coefficients
#FCD_M = lambda C : F_M.coefficients_of_derivative_of(C)
#FCD_L = lambda C : F_L.coefficients_of_derivative_of(C)
def FCD_M (C):
retval = F_M.coefficients_of_derivative_of(C)
return retval.reshape(retval.size)
def FCD_L (C):
retval = F_L.coefficients_of_derivative_of(C)
return retval.reshape(retval.size)
# FCD_M and FCD_L are linear, so their differentials are very simple.
D_FCD_M = lambda (C,V) : (FCD_M(C),FCD_M(V))
D_FCD_L = lambda (C,V) : (FCD_L(C),FCD_L(V))
position = \
composition_of_functions( \
chi, \
cartesian_product_of_functions_with_shared_domain( \
composition_of_functions(realify_v, FT_M, complexify_v), \
composition_of_functions( \
imag_v, \
sum_of_functions_with_shared_domain( \
composition_of_functions(FT_L, complexify_v, zeta_M), \
linear_term \
) \
) \
) \
)
velocity = \
composition_of_functions( \
chi, \
cartesian_product_of_functions_with_shared_domain( \
composition_of_functions(realify_v, FT_M, FCD_M, complexify_v), \
composition_of_functions( \
imag_v, \
sum_of_functions_with_shared_domain( \
composition_of_functions(FT_L, FCD_L, complexify_v, zeta_M), \
constant_term \
) \
) \
) \
)
use_Q_contribution = False
if not use_Q_contribution:
action = \
composition_of_functions( \
I, \
Lagrangian_v, \
eta, \
cartesian_product_of_functions_with_shared_domain(position, velocity) \
)
else:
action = \
sum_of_functions_with_shared_domain( \
composition_of_functions( \
I, \
Lagrangian_v, \
eta, \
cartesian_product_of_functions_with_shared_domain(position, velocity) \
), \
composition_of_functions( \
lambda x : 100000.0*x**2, \
imag, \
Q_M
) \
)
D_position = \
composition_of_functions( \
D_chi, \
cartesian_product_of_functions_with_shared_domain( \
composition_of_functions(D_realify_v, D_FT_M, D_complexify_v), \
composition_of_functions( \
D_imag_v, \
direct_sum_of_functions_with_shared_domain( \
composition_of_functions(D_FT_L, D_complexify_v, D_zeta_M), \
D_linear_term \
) \
) \
) \
)
D_velocity = \
composition_of_functions( \
D_chi, \
cartesian_product_of_functions_with_shared_domain( \
composition_of_functions(D_realify_v, D_FT_M, D_FCD_M, D_complexify_v), \
composition_of_functions( \
D_imag_v, \
direct_sum_of_functions_with_shared_domain( \
composition_of_functions(D_FT_L, D_FCD_L, D_complexify_v, D_zeta_M), \
D_constant_term \
) \
) \
) \
)
D_action = \
composition_of_functions( \
D_I, \
D_Lagrangian_v, \
D_eta, \
cartesian_product_of_functions_with_shared_domain(D_position, D_velocity) \
)
constraint = composition_of_functions(imag, Q_M)
D_constraint = composition_of_functions(D_imag, D_Q_M)
def standard_basis_vector (dim, index):
return vector([1 if i == index else 0 for i in range(dim)])
def D_action_total (R):
# This is super wasteful, but for now it's ok
dim = len(R)
return vector([D_action((R,standard_basis_vector(dim,i)))[1] for i in range(dim)])
def D_constraint_total (R):
# This is super wasteful, but for now it's ok
dim = len(R)
return vector([D_constraint((R,standard_basis_vector(dim,i)))[1] for i in range(dim)])
def Lambda (R_lagmult):
R = R_lagmult[:-1]
lagmult = R_lagmult[-1]
return action(R) + lagmult*constraint(R)
def D_Lambda_total (R_lagmult):
R = R_lagmult[:-1]
lagmult = R_lagmult[-1]
dLambda_dR = D_action_total(R) + lagmult*D_constraint_total(R)
dLambda_dlagmult = constraint(R)
retval = np.ndarray((len(R_lagmult),), dtype=float)
retval[:-1] = dLambda_dR
retval[-1] = dLambda_dlagmult
return retval
def objective_function (R_lagmult):
print 'objective_function({0})'.format(R_lagmult)
return squared_L2_norm(D_Lambda_total(R_lagmult))
#TODO: blah... actually need the total differential, not an evaluated differential.
# def coreys_5_fold_curve ():
# initial_condition = [1.0, 0.0, math.sqrt(3.0)/4.0, 0.0, 1.0, 0.0]
# period = 46.5
# omega = cmath.exp(2.0j*math.pi/period)
# sample_count = 10000
# (Xs,Ts) = vector_field.compute_flow_curve(heisenberg_dynamics.hamiltonian_vector_field, initial_condition, 0.0, period, sample_count)
# XY = vector([complex(x,y) for x,y,_,_,_,_ in Xs])
# plt.figure(1, figsize=(20,10))
# plt.subplot(1,2,1)
# plt.plot([xy.real for xy in XY], [xy.imag for xy in XY])
# sample_times = np.linspace(0.0, period, sample_count+1)
# Ft = fourier.Transform(M, sample_times)
# C = Ft.coefficients_of(XY)
# XY = Ft.sampled_sum_of(C)
# plt.subplot(1,2,2)
# plt.plot([xy.real for xy in XY], [xy.imag for xy in XY])
# plt.savefig('hippo.png')
# return realify_v(C)
R = realify_v(vector([complex(np.random.randn(), np.random.randn())/(m if m != 0 else 1) for m in M]))
# R = np.random.randn(2*len(M))
# for i,m in enumerate(M):
# #i = M_inv[m]
# if m != 0:
# R[2*i:2*(i+1)] /= abs(m)
# #C = colvec([complex(np.random.randn(), np.random.randn()) for m in M])
# R_lagmult = np.random.randn(2*len(M)+1) # The last component is lagmult.
# print 'len(R_lagmult) = {0}'.format(len(R_lagmult))
# for _ in range(2):
# # First find an initial condition which has constraint(R) near 0.
# R = scipy.optimize.fmin_powell(lambda R : constraint(R)**2, R, disp=True)
# # Optimize with respect to objective_function. This defines the constrained dynamics problem.
# R_lagmult[:-1] = R
# R_lagmult = scipy.optimize.fmin_powell(objective_function, R_lagmult, disp=True)
# R = R_lagmult[:-1]
# lagmult = R_lagmult[-1]
# R = coreys_5_fold_curve()
# R_lagmult = np.ndarray((len(R)+1,), dtype=float)
# R_lagmult[:-1] = R
# R_lagmult[-1] = 1.0 # Arbitrary
# print 'before optimization: objective_function(R_lagmult) = {0}'.format(objective_function(R_lagmult))
# R_lagmult = scipy.optimize.fmin_powell(objective_function, R_lagmult, disp=True)
# print 'after optimization: objective_function(R_lagmult) = {0}'.format(objective_function(R_lagmult))
# R = R_lagmult[:-1]
# print 'zip(M,complexify_v(R)) = {0}'.format(zip(M,complexify_v(R)))
# Attempt to optimize to find the argmin of the action functional.
#R = scipy.optimize.fmin_powell(action, R, ftol=1.0e-5, disp=True)
# A = action(R)
# print 'action = {0}'.format(A)
# print 'constraint = {0}'.format(constraint(R))
# print 'D_position(R)*e0 squared L2 norm = {0}'.format(squared_L2_norm(D_position((R,standard_basis_vector(len(R),0)))[1]))
# print 'D_velocity(R)*e0 squared L2 norm = {0}'.format(squared_L2_norm(D_velocity((R,standard_basis_vector(len(R),0)))[1]))
# print 'D_action(R)*e0 = {0}'.format(D_action((R,standard_basis_vector(len(R),0))))
# print 'D_constraint(R)*e0 = {0}'.format(D_constraint((R,standard_basis_vector(len(R),0))))
# D_action_total_R = D_action_total(R)
# print 'D_action_total(R) = {0}'.format(D_action_total_R)
# print 'D_action_total(R) squared L2 norm = {0}'.format(squared_L2_norm(D_action_total_R))
# D_constraint_total_R = D_constraint_total(R)
# print 'D_constraint_total(R) = {0}'.format(D_constraint_total_R)
# print 'D_constraint_total(R) squared L2 norm = {0}'.format(squared_L2_norm(D_constraint_total_R))
print 'alpha = {0}, beta = {1}'.format(alpha, beta)
print 'R = {0}'.format(R)
print 'M = {0}'.format(M)
print 'Q_M(R) = {0}'.format(Q_M(R))
print 'period = {0}'.format(period)
print 'sample_count = {0}'.format(sample_count)
P = position(R)
print 'P.shape = {0}'.format(P.shape)
dP = derivative_of(P, sample_times)
print 'dP.shape = {0}'.format(dP.shape)
V = velocity(R)
print 'V.shape = {0}'.format(V.shape)
# Sanity check that the discrete derivative dP is about equal to V.
print 'diagram commutation failure amount (should be near zero): {0}'.format(squared_L2_norm(dP - V[:-1,:]))
plt.figure(1, figsize=(10,15))
plt.subplot(3,2,1)
plt.title('image of (x(t),y(t))')
#plt.axes().set_aspect('equal')
plt.plot(P[:,0], P[:,1])
#plt.figure(3)
plt.subplot(3,2,2)
plt.title('z(t)')
plt.plot(sample_times, P[:,2])
plt.subplot(3,2,3)
plt.title('image of (x\'(t),y\'(t))')
#plt.axes().set_aspect('equal')
plt.plot(V[:,0], V[:,1])
#plt.figure(4)
plt.subplot(3,2,4)
plt.title('z\'(t)')
plt.plot(sample_times, V[:,2])
PV = eta([P, V])
H_of_PV = Hamiltonian_v(PV)
L_of_PV = Lagrangian_v(PV)
print 'len(sample_times) = {0}, len(H_of_PV) = {1}, len(L_of_PV) = {2}'.format(len(sample_times), len(H_of_PV), len(L_of_PV))
plt.subplot(3,2,5)
plt.title('H(t)')
plt.plot(sample_times, H_of_PV)
#plt.figure(6)
plt.subplot(3,2,6)
plt.title('L(t)')
plt.plot(sample_times, L_of_PV)
plt.savefig('ostrich.png')
#return R,M,period,sample_count
if __name__ == "__main__":
main()
| mit |
derdav3/tf-sparql | data/BioPortal/older try/linear_regression_select.py | 1 | 3154 | '''
largely adapts: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py
goal is to show linear dependency between number of variables in SELECT statement and execution time
'''
from __future__ import print_function
from bio_select_variables import *
import tensorflow as tf
import numpy
import sys, re
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
train_X = numpy.array([])
train_Y = numpy.array([])
# # Testing example, as requested (Issue #2)
# test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])
# test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])
n_samples = 0
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
def load_data():
db = readout_feature()
global train_X
global train_Y
# db_split = re.findall('^(.*?)\n', db, re.DOTALL)
for entry in (line for i, line in enumerate(db) if i<=250):
# print(entry)
entry = re.split(r'[\t|\n]', entry)
train_X = numpy.append(train_X,float(entry[0]))
train_Y = numpy.append(train_Y,float(entry[1]))
return db
def linear_model():
# Construct a linear model
return tf.add(tf.mul(X, W), b)
def train_linear_model(data):
pred = linear_model()
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*len(data))
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
# Initializing the variables
init = tf.global_variables_initializer()
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
# print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
# "W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
# print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
# Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
# print("Testing... (Mean square loss Comparison)")
# testing_cost = sess.run(
# tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
# feed_dict={X: test_X, Y: test_Y}) # same function as cost above
# print("Testing cost=", testing_cost)
# print("Absolute mean square loss difference:", abs(
# training_cost - testing_cost))
# plt.plot(test_X, test_Y, 'bo', label='Testing data')
# plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
# plt.legend()
# plt.show()
def main():
data = load_data()
train_linear_model(data)
if __name__ == '__main__':
main() | mit |
jreback/pandas | pandas/tests/indexes/datetimes/methods/test_factorize.py | 2 | 3629 | import numpy as np
from pandas import DatetimeIndex, Index, date_range, factorize
import pandas._testing as tm
class TestDatetimeIndexFactorize:
def test_factorize(self):
idx1 = DatetimeIndex(
["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"]
)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
# tz must be preserved
idx1 = idx1.tz_localize("Asia/Tokyo")
exp_idx = exp_idx.tz_localize("Asia/Tokyo")
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
idx2 = DatetimeIndex(
["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]
)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
assert idx.freq == exp_idx.freq
def test_factorize_preserves_freq(self):
# GH#38120 freq should be preserved
idx3 = date_range("2000-01", periods=4, freq="M", tz="Asia/Tokyo")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
assert idx.freq == idx3.freq
arr, idx = factorize(idx3)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
assert idx.freq == idx3.freq
def test_factorize_tz(self, tz_naive_fixture, index_or_series):
tz = tz_naive_fixture
# GH#13750
base = date_range("2016-11-05", freq="H", periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
expected = base._with_freq(None)
tm.assert_index_equal(res, expected)
assert res.freq == expected.freq
def test_factorize_dst(self, index_or_series):
# GH#13750
idx = date_range("2016-11-06", freq="H", periods=12, tz="US/Eastern")
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
if index_or_series is Index:
assert res.freq == idx.freq
idx = date_range("2016-06-13", freq="H", periods=12, tz="US/Eastern")
obj = index_or_series(idx)
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
if index_or_series is Index:
assert res.freq == idx.freq
| bsd-3-clause |
Azaruddinjailor/ml_lab_ecsc_306 | labwork/lab7/sci-learn/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| apache-2.0 |
shyamalschandra/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
suyashbire1/pyhton_scripts_mom6 | plot_twamomy_budget_complete.py | 1 | 16400 | import sys
import readParams_moreoptions as rdp1
import matplotlib.pyplot as plt
from mom_plot import m6plot
import numpy as np
from netCDF4 import MFDataset as mfdset, Dataset as dset
import time
def extract_twamomy_terms(geofil,vgeofil,fil,xstart,xend,ystart,yend,zs,ze,meanax,
alreadysaved=False,xyasindices=False,calledfrompv=False):
if not alreadysaved:
keepax = ()
for i in range(4):
if i not in meanax:
keepax += (i,)
fhvgeo = dset(vgeofil)
db = -fhvgeo.variables['g'][:]
dbi = np.append(db,0)
fhvgeo.close()
fhgeo = dset(geofil)
fh = mfdset(fil)
zi = rdp1.getdims(fh)[2][0]
dbl = -np.diff(zi)*9.8/1031
if xyasindices:
(xs,xe),(ys,ye) = (xstart,xend),(ystart,yend)
_,_,dimv = rdp1.getdimsbyindx(fh,xs,xe,ys,ye,
zs=zs,ze=ze,ts=0,te=None,xhxq='xh',yhyq='yq',zlzi='zl')
else:
(xs,xe),(ys,ye),dimv = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend,
slat=ystart, nlat=yend,zs=zs,ze=ze,yhyq='yq')
D = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[0]
(ah,aq) = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye+1)[1]
Dforgetvtwaforxdiff = rdp1.getgeombyindx(fhgeo,xs-1,xe,ys,ye)[0]
Dforgetvtwaforydiff = rdp1.getgeombyindx(fhgeo,xs,xe,ys-1,ye+1)[0]
Dforgethuforxdiff = rdp1.getgeombyindx(fhgeo,xs-1,xe,ys,ye+1)[0]
dxt,dyt = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye+1)[2][6:8]
dycv = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][3]
dxbu = rdp1.getgeombyindx(fhgeo,xs-1,xe,ys,ye)[2][4]
nt_const = dimv[0].size
t0 = time.time()
print('Reading data using loop...')
v = fh.variables['v'][0:1,zs:ze,ys:ye,xs:xe]
nt = np.ones(v.shape)*nt_const
frhatv = fh.variables['frhatv'][0:1,zs:ze,ys:ye,xs:xe]
h_v = frhatv*D[np.newaxis,np.newaxis,:,:]
h_v = np.ma.masked_array(h_v,mask=(h_v<=1e-3).astype(int))
nt[h_v<=1e-3] -= 1
hvm = (h_v*v).filled(0)
hvvm = (h_v*v*v).filled(0)
h_vm = h_v.filled(0)
hvmforxdiff, h_vmforxdiff = getvtwaforxdiff(fh,fhgeo,Dforgetvtwaforxdiff,0,xs-1,xe,ys,ye,zs,ze)
hvmforydiff, h_vmforydiff, hvvmforydiff = getvtwaforydiff(fh,fhgeo,Dforgetvtwaforydiff,0,xs,xe,ys-1,ye+1,zs,ze)
cav = fh.variables['CAv'][0:1,zs:ze,ys:ye,xs:xe]
gkev = fh.variables['gKEv'][0:1,zs:ze,ys:ye,xs:xe]
rvxu = fh.variables['rvxu'][0:1,zs:ze,ys:ye,xs:xe]
hmfum = (h_v*(cav - gkev - rvxu)).filled(0)
pfvm = fh.variables['PFv'][0:1,zs:ze,ys:ye,xs:xe]
# pfvm = np.ma.masked_array(pfvm,mask=(h_v<=1e-3).astype(int))
# pfvm = pfvm.filled(0)
hdvdtviscm = (h_v*fh.variables['dv_dt_visc'][0:1,zs:ze,ys:ye,xs:xe]).filled(0)
hdiffvm = (h_v*fh.variables['diffv'][0:1,zs:ze,ys:ye,xs:xe]).filled(0)
dvdtdia = fh.variables['dvdt_dia'][0:1,zs:ze,ys:ye,xs:xe]
wd = fh.variables['wd'][0:1,zs:ze,ys:ye+1,xs:xe]
wd = np.diff(wd,axis=1)
wdm = wd
hvwbm = (v*(wd[:,:,0:-1,:]+wd[:,:,1:,:])/2 - h_v*dvdtdia).filled(0)
uh = fh.variables['uh'][0:1,zs:ze,ys:ye+1,xs-1:xe]
uh = np.ma.filled(uh.astype(float), 0)
uhx = np.diff(uh,axis = 3)/ah
huvxpTm = (v*(uhx[:,:,0:-1,:]+uhx[:,:,1:,:])/2 - h_v*rvxu).filled(0)
vh = fh.variables['vh'][0:1,zs:ze,ys-1:ye+1,xs:xe]
vh = np.ma.filled(vh.astype(float), 0)
vhy = np.diff(vh,axis = 2)/ah
hvvymTm = (v*(vhy[:,:,0:-1,:]+vhy[:,:,1:,:])/2 - h_v*gkev).filled(0)
u = fh.variables['u'][0:1,zs:ze,ys:ye+1,xs-1:xe]
u = 0.25*(u[:,:,0:-1,0:-1] + u[:,:,1:,0:-1] + u[:,:,0:-1,1:] +
u[:,:,1:,1:])
hum = (h_v*u).filled(0)
humforxdiff = gethuforxdiff(fh,fhgeo,Dforgethuforxdiff,0,xs-1,xe,ys,ye+1,zs,ze)
wd = fh.variables['wd'][i:i+1,zs:ze,ys:ye+1,xs:xe]
hw = wd*dbi[:,np.newaxis,np.newaxis]
hw = 0.5*(hw[:,0:-1,:,:] + hw[:,1:,:,:])
hwm_v = 0.5*(hw[:,:,0:-1,:] + hw[:,:,1:,:])
emforydiff = fh.variables['e'][0:1,zs:ze,ys:ye+1,xs:xe]/nt_const
if 1 in keepax:
em = fh.variables['e'][0:1,zs:ze,ys:ye,xs:xe]/nt_const
for i in range(1,nt_const):
v = fh.variables['v'][i:i+1,zs:ze,ys:ye,xs:xe]
frhatv = fh.variables['frhatv'][i:i+1,zs:ze,ys:ye,xs:xe]
h_v = frhatv*D[np.newaxis,np.newaxis,:,:]
h_v = np.ma.masked_array(h_v,mask=(h_v<=1e-3).astype(int))
nt[h_v<=1e-3] -= 1
hvm += (h_v*v).filled(0)
h_vm += h_v.filled(0)
hvforxdiff, h_vforxdiff = getvtwaforxdiff(fh,fhgeo,Dforgetvtwaforxdiff,i,xs-1,xe,ys,ye,zs,ze)
hvforydiff, h_vforydiff, hvvforydiff = getvtwaforydiff(fh,fhgeo,Dforgetvtwaforydiff,i,xs,xe,ys-1,ye+1,zs,ze)
hvmforxdiff += hvforxdiff
h_vmforxdiff += h_vforxdiff
hvvmforydiff += hvvforydiff
hvmforydiff += hvforydiff
h_vmforydiff += h_vforydiff
cav = fh.variables['CAv'][i:i+1,zs:ze,ys:ye,xs:xe]
gkev = fh.variables['gKEv'][i:i+1,zs:ze,ys:ye,xs:xe]
rvxu = fh.variables['rvxu'][i:i+1,zs:ze,ys:ye,xs:xe]
hmfu = h_v*(cav - gkev - rvxu)
hmfum += hmfu.filled(0)
pfv = fh.variables['PFv'][i:i+1,zs:ze,ys:ye,xs:xe]
# pfv = np.ma.masked_array(pfv,mask=(h_v<=1e-3).astype(int))
# pfvm += pfv.filled(0)
pfvm += pfv
hdvdtvisc = h_v*fh.variables['dv_dt_visc'][i:i+1,zs:ze,ys:ye,xs:xe]
hdvdtviscm += hdvdtvisc.filled(0)
hdiffv = h_v*fh.variables['diffv'][i:i+1,zs:ze,ys:ye,xs:xe]
hdiffvm += hdiffv.filled(0)
dvdtdia = fh.variables['dvdt_dia'][i:i+1,zs:ze,ys:ye,xs:xe]
wd = fh.variables['wd'][i:i+1,zs:ze,ys:ye+1,xs:xe]
wd = np.diff(wd,axis=1)
wdm += wd
hvwb = (v*(wd[:,:,0:-1,:]+wd[:,:,1:,:])/2 - h_v*dvdtdia)
hvwb = np.ma.masked_array(hvwb,mask=(h_v<=1e-3).astype(int))
hvwbm += hvwb.filled(0)
uh = fh.variables['uh'][0:1,zs:ze,ys:ye+1,xs-1:xe]
uh = np.ma.filled(uh.astype(float), 0)
uhx = np.diff(uh,axis = 3)/ah
huvxpT = (v*(uhx[:,:,0:-1,:]+uhx[:,:,1:,:])/2 - h_v*rvxu).filled(0)
huvxpTm += huvxpT
vh = fh.variables['vh'][0:1,zs:ze,ys-1:ye+1,xs:xe]
vh = np.ma.filled(vh.astype(float), 0)
vhy = np.diff(vh,axis = 2)/ah
hvvymT = (v*(vhy[:,:,0:-1,:]+vhy[:,:,1:,:])/2 - h_v*gkev).filled(0)
hvvymTm = hvvymT
u = fh.variables['u'][0:1,zs:ze,ys:ye+1,xs-1:xe]
u = 0.25*(u[:,:,0:-1,0:-1] + u[:,:,1:,0:-1] + u[:,:,0:-1,1:] +
u[:,:,1:,1:])
hum += (h_v*u).filled(0)
humforxdiff += gethuforxdiff(fh,fhgeo,Dforgethuforxdiff,i,xs-1,xe,ys,ye+1,zs,ze)
wd = fh.variables['wd'][i:i+1,zs:ze,ys:ye+1,xs:xe]
hw = wd*dbi[:,np.newaxis,np.newaxis]
hw = 0.5*(hw[:,0:-1,:,:] + hw[:,1:,:,:])
hwm_v += 0.5*(hw[:,:,0:-1,:] + hw[:,:,1:,:])
emforydiff += fh.variables['e'][i:i+1,zs:ze,ys:ye+1,xs:xe]/nt_const
if 1 in keepax:
em += fh.variables['e'][i:i+1,zs:ze,ys:ye,xs:xe]/nt_const
sys.stdout.write('\r'+str(int((i+1)/nt_const*100))+'% done...')
sys.stdout.flush()
fhgeo.close()
print('Time taken for data reading: {}s'.format(time.time()-t0))
elm = 0.5*(em[:,0:-1,:,:]+em[:,1:,:,:])
elmforydiff = 0.5*(emforydiff[:,0:-1,:,:]+emforydiff[:,1:,:,:])
vtwa = hvm/h_vm
vtwaforxdiff = hvmforxdiff/h_vmforxdiff
vtwaforydiff = hvmforydiff/h_vmforydiff
vtwaforxdiff = np.concatenate((vtwaforxdiff,-vtwaforxdiff[:,:,:,-1:]),axis=3)
vtwax = np.diff(vtwaforxdiff,axis=3)/dxbu
vtwax = 0.5*(vtwax[:,:,:,0:-1] + vtwax[:,:,:,1:])
vtway = np.diff(vtwaforydiff,axis=2)/dyt
vtway = 0.5*(vtway[:,:,0:-1,:] + vtway[:,:,1:,:])
humx = np.diff(humforxdiff,axis=3)/dxt
humx = 0.5*(humx[:,:,0:-1,:] + humx[:,:,1:,:])
hvmy = np.diff(hvmforydiff,axis=2)/dyt
hvmy = 0.5*(hvmy[:,:,0:-1,:] + hvmy[:,:,1:,:])
huvxphvvym = huvxpTm + hvvymTm
hvvym = np.diff(hvvmforydiff,axis=2)/dyt
hvvym = 0.5*(hvvym[:,:,0:-1,:] + hvvym[:,:,1:,:])
huvxm = huvxphvvym - hvvym
vtwaforvdiff = np.concatenate((vtwa[:,[0],:,:],vtwa),axis=1)
vtwab = np.diff(vtwaforvdiff,axis=1)/db[:,np.newaxis,np.newaxis]
vtwab = np.concatenate((vtwab,np.zeros([vtwab.shape[0],1,vtwab.shape[2],vtwab.shape[3]])),axis=1)
vtwab = 0.5*(vtwab[:,0:-1,:,:] + vtwab[:,1:,:,:])
hwb_v = 0.5*(wdm[:,:,0:-1,:] + wdm[:,:,1:,:])
print('Calculating form drag using loop...')
t0 = time.time()
e = fh.variables['e'][0:1,zs:ze,ys:ye+1,xs:xe]
el = 0.5*(e[:,0:-1,:,:] + e[:,1:,:,:])
ed = e - emforydiff
edl = el - elmforydiff
edlsqm = (edl**2)
pfv = fh.variables['PFv'][0:1,zs:ze,ys:ye,xs:xe]
pfvd = pfv - pfvm/nt_const
geta = 9.8*ed[:,:1,:,:]/1031
getay = np.diff(geta,axis=2)/dycv
pfvd = np.concatenate((pfvd,np.zeros([pfvd.shape[0],1,pfvd.shape[2],pfvd.shape[3]])),axis=1)
pfvd = 0.5*(pfvd[:,0:-1,:,:] + pfvd[:,1:,:,:])
pfvd = np.concatenate((-getay,pfvd),axis=1)
ed = 0.5*(ed[:,:,0:-1,:] + ed[:,:,1:,:])
edpfvdm = ed*pfvd
for i in range(1,nt_const):
e = fh.variables['e'][i:i+1,zs:ze,ys:ye+1,xs:xe]
el = 0.5*(e[:,0:-1,:,:] + e[:,1:,:,:])
ed = e - emforydiff
edl = el - elmforydiff
edlsqm += (edl**2)
pfv = fh.variables['PFv'][i:i+1,zs:ze,ys:ye,xs:xe]
pfvd = pfv - pfvm/nt_const
geta = 9.8*ed[:,:1,:,:]/1031
getay = np.diff(geta,axis=2)/dycv
pfvd = np.concatenate((pfvd,np.zeros([pfvd.shape[0],1,pfvd.shape[2],pfvd.shape[3]])),axis=1)
pfvd = 0.5*(pfvd[:,0:-1,:,:] + pfvd[:,1:,:,:])
pfvd = np.concatenate((-getay,pfvd),axis=1)
ed = 0.5*(ed[:,:,0:-1,:] + ed[:,:,1:,:])
edpfvdm += ed*pfvd
sys.stdout.write('\r'+str(int((i+1)/nt_const*100))+'% done...')
sys.stdout.flush()
print('Time taken for data reading: {}s'.format(time.time()-t0))
fh.close()
edlsqmy = np.diff(edlsqm,axis=2)/dycv
advx = hum*vtwax/h_vm
advy = vtwa*vtway
advb = hwm_v*vtwab/h_vm
cor = hmfum/h_vm
pfvm = pfvm/nt_const
xdivep1 = huvxm/h_vm
xdivep2 = -advx
xdivep3 = -vtwa*humx/h_vm
xdivep = (xdivep1 + xdivep2 + xdivep3)
ydivep1 = hvvym/h_vm
ydivep2 = -advy
ydivep3 = -vtwa*hvmy/h_vm
ydivep4 = 0.5*edlsqmy*dbl[:,np.newaxis,np.newaxis]/h_vm
ydivep = (ydivep1 + ydivep2 + ydivep3 + ydivep4)
bdivep1 = hvwbm/h_vm
bdivep2 = -advb
bdivep3 = -vtwa*hwb_v/h_vm
bdivep4 = np.diff(edpfvdm,axis=1)/db[:,np.newaxis,np.newaxis]*dbl[:,np.newaxis,np.newaxis]/h_vm
bdivep = (bdivep1 + bdivep2 + bdivep3 + bdivep4)
Y1twa = hdiffvm/h_vm
Y2twa = hdvdtviscm/h_vm
terms = np.ma.concatenate(( -advx[:,:,:,:,np.newaxis],
-advy[:,:,:,:,np.newaxis],
-advb[:,:,:,:,np.newaxis],
cor[:,:,:,:,np.newaxis],
pfvm[:,:,:,:,np.newaxis],
-xdivep[:,:,:,:,np.newaxis],
-ydivep[:,:,:,:,np.newaxis],
-bdivep[:,:,:,:,np.newaxis],
Y1twa[:,:,:,:,np.newaxis],
Y2twa[:,:,:,:,np.newaxis]),
axis=4)
termsep = np.ma.concatenate(( xdivep1[:,:,:,:,np.newaxis],
xdivep3[:,:,:,:,np.newaxis],
ydivep1[:,:,:,:,np.newaxis],
ydivep3[:,:,:,:,np.newaxis],
ydivep4[:,:,:,:,np.newaxis],
bdivep1[:,:,:,:,np.newaxis],
bdivep3[:,:,:,:,np.newaxis],
bdivep4[:,:,:,:,np.newaxis]),
axis=4)
terms[np.isinf(terms)] = np.nan
termsep[np.isinf(termsep)] = np.nan
termsm = np.ma.apply_over_axes(np.nanmean, terms, meanax)
termsepm = np.ma.apply_over_axes(np.nanmean, termsep, meanax)
X = dimv[keepax[1]]
Y = dimv[keepax[0]]
if 1 in keepax:
em = np.ma.apply_over_axes(np.mean, em, meanax)
elm = np.ma.apply_over_axes(np.mean, elm, meanax)
Y = elm.squeeze()
X = np.meshgrid(X,dimv[1])[0]
P = termsm.squeeze()
P = np.ma.filled(P.astype(float), np.nan)
Pep = termsepm.squeeze()
Pep = np.ma.filled(Pep.astype(float), np.nan)
X = np.ma.filled(X.astype(float), np.nan)
Y = np.ma.filled(Y.astype(float), np.nan)
if not calledfrompv:
np.savez('twamomy_complete_terms', X=X,Y=Y,P=P,Pep=Pep)
else:
npzfile = np.load('twamomy_complete_terms.npz')
X = npzfile['X']
Y = npzfile['Y']
P = npzfile['P']
Pep = npzfile['Pep']
return (X,Y,P,Pep)
def getvtwaforxdiff(fh,fhgeo,D,i,xs,xe,ys,ye,zs,ze):
v = fh.variables['v'][i:i+1,zs:ze,ys:ye,xs:xe]
frhatv = fh.variables['frhatv'][i:i+1,zs:ze,ys:ye,xs:xe]
h_v = frhatv*D[np.newaxis,np.newaxis,:,:]
h_v = np.ma.masked_array(h_v,mask=(h_v<=1e-3).astype(int))
return ((h_v*v).filled(0), h_v.filled(0))
def getvtwaforydiff(fh,fhgeo,D,i,xs,xe,ys,ye,zs,ze):
v = fh.variables['v'][i:i+1,zs:ze,ys:ye,xs:xe]
frhatv = fh.variables['frhatv'][i:i+1,zs:ze,ys:ye,xs:xe]
h_v = frhatv*D[np.newaxis,np.newaxis,:,:]
h_v = np.ma.masked_array(h_v,mask=(h_v<=1e-3).astype(int))
return ((h_v*v).filled(0), h_v.filled(0), (h_v*v*v).filled(0))
def gethuforxdiff(fh,fhgeo,D,i,xs,xe,ys,ye,zs,ze):
u = fh.variables['u'][i:i+1,zs:ze,ys:ye,xs:xe]
frhatu = fh.variables['frhatu'][i:i+1,zs:ze,ys:ye,xs:xe]
h_u = frhatu*D[np.newaxis,np.newaxis,:,:]
h_u = np.ma.masked_array(h_u,mask=(h_u<=1e-3).astype(int))
return (h_u*u).filled(0)
def plot_twamomy(geofil,vgeofil,fil,xstart,xend,ystart,yend,zs,ze,meanax,
cmaxscalefactor=1,cmaxscalefactorforep=1, savfil=None,savfilep=None,alreadysaved=False):
X,Y,P,Pep = extract_twamomy_terms(geofil,vgeofil,fil,xstart,xend,ystart,yend,zs,ze,meanax,
alreadysaved)
cmax = np.nanmax(np.absolute(P))*cmaxscalefactor
plt.figure()
ti = ['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)','(i)','(j)']
for i in range(P.shape[-1]):
ax = plt.subplot(5,2,i+1)
im = m6plot((X,Y,P[:,:,i]),ax,Zmax=cmax,titl=ti[i])
if i % 2:
ax.set_yticklabels([])
else:
plt.ylabel('z (m)')
if i > 7:
plt.xlabel('x from EB (Deg)')
else:
ax.set_xticklabels([])
if savfil:
plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
im = m6plot((X,Y,np.sum(P,axis=2)),Zmax=cmax)
plt.show()
cmax = np.nanmax(np.absolute(Pep))*cmaxscalefactorforep
plt.figure()
for i in range(Pep.shape[-1]):
ax = plt.subplot(4,2,i+1)
im = m6plot((X,Y,Pep[:,:,i]),ax,Zmax=cmax,titl=ti[i])
if i % 2:
ax.set_yticklabels([])
else:
plt.ylabel('z (m)')
if i > 5:
plt.xlabel('x from EB (Deg)')
else:
ax.set_xticklabels([])
if savfilep:
plt.savefig(savfilep+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
| gpl-3.0 |
barentsen/RosettaBot | entropy_crop.py | 1 | 4165 | """Smart cropping of an image by maximizing information entropy."""
from __future__ import division
import numpy as np
def image_entropy(img, img_min, img_max):
"""Calculate the entropy of an image.
Parameters
----------
img : numpy array
The image data.
img_min, img_max : float, float
Minimum and maximum to consider when computing the entropy.
Returns
-------
entropy : float
"""
histo, bins = np.histogram(img, bins=255, range=(img_min, img_max))
probabilities = histo / np.sum(histo)
entropy = -sum([p * np.log2(p) for p in probabilities if p != 0])
return entropy
def entropy_crop(img, width, height, max_steps=10):
"""Crops an image such that information entropy is maximized.
This function is originally adapted from the FreeBSD-licensed `cropy`
package, see credits here: https://pypi.python.org/pypi/cropy/0.1
Parameters
----------
img : numpy array
The image data.
width, height : int, int
Desired dimensions of the cropped image.
max_steps : int
Maximum number of iterations (default: 10).
Returns
-------
cropped_img : numpy array
The cropped image data.
"""
img_min, img_max = np.min(img), np.max(img)
original_height, original_width = img.shape
right_x, bottom_y = original_width, original_height
left_x, top_y = 0, 0
# calculate slice size based on max steps
slice_size = int(round((original_width - width) / max_steps))
if slice_size == 0:
slice_size = 1
left_slice = None
right_slice = None
# cut left or right slice of image based on min entropy value until targetwidth is reached
# while there still are uninvestigated slices of the image (left and right)
while ((right_x - left_x - slice_size) > width):
if (left_slice is None):
left_slice = img[0: original_height + 1, left_x: left_x + slice_size + 1]
if (right_slice is None):
right_slice = img[0: original_height + 1, right_x - slice_size: right_x + 1]
if (image_entropy(left_slice, img_min, img_max) < image_entropy(right_slice, img_min, img_max)):
left_x = left_x + slice_size
left_slice = None
else:
right_x = right_x - slice_size
right_slice = None
top_slice = None
bottom_slice = None
# calculate slice size based on max steps
slice_size = int(round((original_height - height) / max_steps))
if slice_size == 0:
slice_size = 1
# cut upper or bottom slice of image based on min entropy value until
# target height is reached
# while there still are uninvestigated slices of the image (top and bottom)
while ((bottom_y - top_y - slice_size) > height):
if (top_slice is None):
top_slice = img[top_y: top_y + slice_size + 1, 0: original_width + 1]
if (bottom_slice is None):
bottom_slice = img[bottom_y - slice_size:bottom_y + 1, 0: original_width + 1]
if (image_entropy(top_slice, img_min, img_max) < image_entropy(bottom_slice, img_min, img_max)):
top_y = top_y + slice_size
top_slice = None
else:
bottom_y = bottom_y - slice_size
bottom_slice = None
return img[top_y: top_y + height,
left_x + width: left_x: -1]
if __name__ == '__main__':
"""Example use"""
from matplotlib.image import imsave
from matplotlib import cm
from astropy.io import fits
from astropy import log
from astropy.visualization import scale_image
fts = fits.open('http://imagearchives.esac.esa.int/data_raw/ROSETTA/NAVCAM'
'/RO-C-NAVCAM-2-PRL-MTP007-V1.0/DATA/CAM1'
'/ROS_CAM1_20140922T060854F.FIT')
img = fts[0].data
width, height = 512, 256
image_cropped = entropy_crop(img, width, height)
image_scaled = scale_image(image_cropped, scale='linear',
min_percent=0.05, max_percent=99.95)
jpg_fn = 'test.jpg'
log.info('Writing {0}'.format(jpg_fn))
imsave(jpg_fn, image_scaled, cmap=cm.gray)
| mit |
benanne/morb | examples/example_mnist_persistent.py | 1 | 4886 | import morb
from morb import rbms, stats, updaters, trainers, monitors
import theano
import theano.tensor as T
import numpy as np
import gzip, cPickle
import matplotlib.pyplot as plt
plt.ion()
from utils import generate_data, get_context
# DEBUGGING
from theano import ProfileMode
# mode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
# mode = theano.compile.DebugMode(check_py_code=False, require_matching_strides=False)
mode = None
# load data
print ">> Loading dataset..."
f = gzip.open('datasets/mnist.pkl.gz','rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
train_set_x, train_set_y = train_set
valid_set_x, valid_set_y = valid_set
test_set_x, test_set_y = test_set
# TODO DEBUG
# train_set_x = train_set_x[:10000]
valid_set_x = valid_set_x[:1000]
n_visible = train_set_x.shape[1]
n_hidden = 500
mb_size = 20
k = 15
learning_rate = 0.1
epochs = 15
print ">> Constructing RBM..."
rbm = rbms.BinaryBinaryRBM(n_visible, n_hidden)
initial_vmap = { rbm.v: T.matrix('v') }
persistent_vmap = { rbm.h: theano.shared(np.zeros((mb_size, n_hidden), dtype=theano.config.floatX)) }
# try to calculate weight updates using CD stats
print ">> Constructing contrastive divergence updaters..."
s = stats.cd_stats(rbm, initial_vmap, visible_units=[rbm.v], hidden_units=[rbm.h], k=k, persistent_vmap=persistent_vmap, mean_field_for_stats=[rbm.v], mean_field_for_gibbs=[rbm.v])
umap = {}
for var in rbm.variables:
pu = var + (learning_rate / float(mb_size)) * updaters.CDUpdater(rbm, var, s)
umap[var] = pu
print ">> Compiling functions..."
t = trainers.MinibatchTrainer(rbm, umap)
m = monitors.reconstruction_mse(s, rbm.v)
m_data = s['data'][rbm.v]
m_model = s['model'][rbm.v]
e_data = rbm.energy(s['data']).mean()
e_model = rbm.energy(s['model']).mean()
# train = t.compile_function(initial_vmap, mb_size=32, monitors=[m], name='train', mode=mode)
train = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, e_data, e_model], name='train', mode=mode)
evaluate = t.compile_function(initial_vmap, mb_size=mb_size, monitors=[m, m_data, m_model, e_data, e_model], name='evaluate', train=False, mode=mode)
def plot_data(d):
plt.figure(5)
plt.clf()
plt.imshow(d.reshape((28,28)), interpolation='gaussian')
plt.draw()
def sample_evolution(start, ns=100): # start = start data
sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
data = start
plot_data(data)
while True:
for k in range(ns):
for x in sample({ rbm.v: data }): # draw a new sample
data = x[0]
plot_data(data)
# TRAINING
print ">> Training for %d epochs..." % epochs
mses_train_so_far = []
mses_valid_so_far = []
edata_train_so_far = []
emodel_train_so_far = []
edata_so_far = []
emodel_so_far = []
for epoch in range(epochs):
monitoring_data_train = [(cost, energy_data, energy_model) for cost, energy_data, energy_model in train({ rbm.v: train_set_x })]
mses_train, edata_train_list, emodel_train_list = zip(*monitoring_data_train)
mse_train = np.mean(mses_train)
edata_train = np.mean(edata_train_list)
emodel_train = np.mean(emodel_train_list)
monitoring_data = [(cost, data, model, energy_data, energy_model) for cost, data, model, energy_data, energy_model in evaluate({ rbm.v: valid_set_x })]
mses_valid, vdata, vmodel, edata, emodel = zip(*monitoring_data)
mse_valid = np.mean(mses_valid)
edata_valid = np.mean(edata)
emodel_valid = np.mean(emodel)
# plotting
mses_train_so_far.append(mse_train)
mses_valid_so_far.append(mse_valid)
edata_so_far.append(edata_valid)
emodel_so_far.append(emodel_valid)
edata_train_so_far.append(edata_train)
emodel_train_so_far.append(emodel_train)
plt.figure(1)
plt.clf()
plt.plot(mses_train_so_far, label='train')
plt.plot(mses_valid_so_far, label='validation')
plt.title("MSE")
plt.legend()
plt.draw()
plt.figure(4)
plt.clf()
plt.plot(edata_so_far, label='validation / data')
plt.plot(emodel_so_far, label='validation / model')
plt.plot(edata_train_so_far, label='train / data')
plt.plot(emodel_train_so_far, label='train / model')
plt.title("energy")
plt.legend()
plt.draw()
# plot some samples
plt.figure(2)
plt.clf()
plt.imshow(vdata[0][0].reshape((28, 28)))
plt.draw()
plt.figure(3)
plt.clf()
plt.imshow(vmodel[0][0].reshape((28, 28)))
plt.draw()
print "Epoch %d" % epoch
print "training set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_train, edata_train, emodel_train)
print "validation set: MSE = %.6f, data energy = %.2f, model energy = %.2f" % (mse_valid, edata_valid, emodel_valid)
| gpl-3.0 |
danx0r/gittrack | sched.py | 1 | 1834 | from pyschedule import Scenario, solvers, plotters
def plot(S) :
if solvers.mip.solve(S):
# %matplotlib inline
plotters.matplotlib.plot(S,task_colors=task_colors,fig_size=(10,5))
else:
print('no solution exists')
if __name__ == "__main__":
S = Scenario('bike_paint_shop', horizon=10)
Alice = S.Resource('Alice')
Bob = S.Resource('Bob')
green_paint, red_paint = S.Task('green_paint', length=2), S.Task('red_paint', length=2)
green_post, red_post = S.Task('green_post'), S.Task('red_post')
S += green_paint < green_post, red_paint + 1 <= red_post
# green_paint += Alice|Bob
# green_post += Alice|Bob
#
# red_paint += Alice|Bob
# red_post += Alice|Bob
S.clear_solution()
S.use_makespan_objective()
task_colors = { green_paint : '#A1D372',
green_post : '#A1D372',
red_paint : '#EB4845',
red_post : '#EB4845',
S['MakeSpan'] : '#7EA7D8'}
# First remove the old resource to task assignments
# green_paint -= Alice|Bob
# green_post -= Alice|Bob
# red_paint -= Alice|Bob
# red_post -= Alice|Bob
# Add new shared ones
green_resource = Alice|Bob
green_paint += green_resource
green_post += green_resource
red_resource = Alice|Bob
red_paint += red_resource
red_post += red_resource
Paint_Shop = S.Resource('Paint_Shop')
red_paint += Paint_Shop
green_paint += Paint_Shop
Lunch = S.Task('Lunch')
Lunch += {Alice, Bob}
S += Lunch > 3, Lunch < 5
task_colors[Lunch] = '#7EA7D8'
S += red_paint > 2
#Alice is a morning bird
S += Alice['length'][:3] >= 3
print(S)
# plot(S)
s = S.solution()
print s
| bsd-3-clause |
cjratcliff/variational-dropout | nets.py | 1 | 8268 | from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.core import Dropout
from layers import FCVarDropout, Conv2DVarDropout
from loss import sgvlb
from utils import get_minibatches_idx, clip
batch_size = 32
eps = 1e-8
class Net():
def fit(self,X,y,sess):
max_epochs = 20
# Split into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=42)
for epoch in range(max_epochs):
start = time.time()
train_indices = get_minibatches_idx(len(X_train), batch_size, shuffle=True)
print("\nEpoch %d" % (epoch+1))
train_accs = []
for c,it in enumerate(train_indices):
batch_train_x = [X_train[i] for i in it]
batch_train_y = [y_train[i] for i in it]
feed_dict = {self.x: batch_train_x,
self.y: batch_train_y,
self.deterministic: False}
_,acc = sess.run([self.train_step,self.accuracy], feed_dict)
train_accs.append(acc)
#print(c,len(train_indices),acc)
print("Training accuracy: %.3f" % np.mean(train_accs))
val_pred = self.predict(X_val,sess)
y = np.argmax(y_val,axis=1)
val_acc = np.mean(np.equal(val_pred,y))
print("Val accuracy: %.3f" % val_acc)
print("Time taken: %.3fs" % (time.time() - start))
return
def predict(self,X,sess):
indices = get_minibatches_idx(len(X), batch_size, shuffle=False)
pred = []
for i in indices:
batch_x = [X[j] for j in i]
feed_dict = {self.x: batch_x,
self.deterministic: True}
pred_batch = sess.run(self.pred, feed_dict)
pred.append(pred_batch)
pred = np.concatenate(pred,axis=0)
pred = np.argmax(pred,axis=1)
pred = np.reshape(pred,(-1))
return pred
class LeNet(Net):
def __init__(self, img_size, num_channels, num_classes):
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
h = Conv2D(32, kernel_size=(3,3),
activation='relu',
input_shape=[None,img_size,img_size,num_channels])(self.x)
h = Conv2D(64, (3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2,2))(h)
h = Flatten()(h)
h = Dense(500, activation='relu')(h)
self.pred = Dense(num_classes, activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
loss = -tf.reduce_sum(tf.log(pred)*self.y)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer()
self.train_step = optimizer.minimize(loss)
class LeNetVarDropout(Net):
def __init__(self, img_size, num_channels, num_classes):
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
h = Conv2DVarDropout(num_channels, 32, (3,3), strides=(1,1))(self.x,d)
h = Conv2DVarDropout(32, 64, (3,3), strides=(1,1))(h,d)
h = MaxPooling2D(pool_size=(2,2))(h)
h = Flatten()(h)
if num_channels == 1:
#h = FCVarDropout(9216,500)(h,d)
h = Dense(500)(h)
elif num_channels == 3:
h = FCVarDropout(12544,500)(h,d)
else:
raise NotImplementedError
#self.pred = FCVarDropout(500,num_classes,tf.nn.softmax)(h,d)
self.pred = Dense(num_classes,activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
W = tf.get_collection('W')
log_sigma2 = tf.get_collection('log_sigma2')
loss = sgvlb(pred, self.y, W, log_sigma2, batch_size, rw=1)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer()
self.train_step = optimizer.minimize(loss)
class VGG(Net):
def __init__(self, img_size, num_channels, num_classes, dropout_prob=0.0):
# Based on https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
phase = tf.logical_not(d)
def conv_bn(h, num_filters, phase):
h = Conv2D(num_filters, (3,3), padding='same')(h) # Linear
h = tf.contrib.layers.batch_norm(h, center=True, scale=False, is_training=phase)
return tf.nn.relu(h)
# Block 1
h = conv_bn(self.x,64,phase)
h = conv_bn(h,64,phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 2
h = conv_bn(h,128,phase)
h = conv_bn(h,128,phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 3
h = conv_bn(h,256,phase)
h = conv_bn(h,256,phase)
h = conv_bn(h,256,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 4
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 5
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = conv_bn(h,512,phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
h = Flatten()(h)
self.pred = Dense(num_classes, activation='softmax')(h)
pred = tf.clip_by_value(self.pred,eps,1-eps)
loss = -tf.reduce_sum(tf.log(pred)*self.y)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer(0.001)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
self.train_step = optimizer.minimize(loss)
class VGGVarDropout(Net):
def __init__(self, img_size, num_channels, num_classes):
# Based on https://github.com/fchollet/keras/blob/master/keras/applications/vgg16.py
self.x = tf.placeholder(tf.float32, [None,img_size,img_size,num_channels], 'x')
self.y = tf.placeholder(tf.float32, [None,num_classes], 'y')
self.deterministic = tf.placeholder(tf.bool, name='d')
d = self.deterministic
phase = tf.logical_not(d)
def conv_bn(h, filters_in, filters_out, d, phase):
h = Conv2DVarDropout(filters_in, filters_out, (3,3), padding='SAME', nonlinearity=tf.identity)(h,d) # Linear
h = tf.contrib.layers.batch_norm(h, center=True, scale=False, is_training=phase)
return tf.nn.relu(h)
# Block 1
h = conv_bn(self.x, num_channels, 64, d, phase)
h = conv_bn(h, 64, 64, d, phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 2
h = conv_bn(h, 64, 128, d, phase)
h = conv_bn(h, 128, 128, d, phase)
h = MaxPooling2D((2, 2), strides=(2,2))(h)
# Block 3
h = conv_bn(h, 128, 256, d, phase)
h = conv_bn(h, 256, 256, d, phase)
h = conv_bn(h, 256, 256, d, phase)
h = MaxPooling2D((2,2), strides=(2,2))(h)
# Block 4
h = conv_bn(h, 256, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = MaxPooling2D((2, 2), strides=(2, 2))(h)
# Block 5
h = conv_bn(h, 512, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = conv_bn(h, 512, 512, d, phase)
h = MaxPooling2D((2, 2), strides=(2, 2))(h)
h = Flatten()(h)
self.pred = FCVarDropout(512, num_classes, tf.nn.softmax)(h,d)
pred = tf.clip_by_value(self.pred,eps,1-eps)
W = tf.get_collection('W')
log_sigma2 = tf.get_collection('log_sigma2')
loss = sgvlb(pred, self.y, W, log_sigma2, batch_size)
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.pred, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
optimizer = tf.train.AdamOptimizer(0.0001)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
self.train_step = optimizer.minimize(loss)
| gpl-3.0 |
ziky5/F4500_Python_pro_fyziky | lekce_07/fisher.py | 1 | 1459 | import pandas
import scipy.optimize
def graphinit():
import matplotlib.pyplot as plt
fontsize=25
plt.rcParams['figure.autolayout'] = False
plt.rcParams['figure.figsize'] = 12, 7
plt.rcParams['axes.labelsize'] = 25
plt.rcParams['axes.titlesize'] = 25
plt.rcParams['font.size'] = 25
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 12
plt.rcParams['legend.fontsize'] = 25
class Fisher:
def __init__(self,filename):
data = pandas.read_csv(filename)
# vztah C = A * B
self.T = data['T']
self.C = data['A'] * data['B']
def f(self,x,C0,C1):
return C0 * x + C1
def fit(self):
self.popt, self.pcov = scipy.optimize.curve_fit(self.f, self.T, self.C)
print(self.popt)
class FisherGraph(Fisher):
def __init__(self,filename):
# initializace Fishera
Fisher.__init__(self,filename)
def graph(self,xlim,xlabel,ylabel,title):
y = self.f(self.T, self.popt[0], self.popt[1]) # y = C0 * x + C1
plt.clf()
plt.cla()
plt.plot(self.T, self.C, 'o', label='data')
plt.plot(self.T, y, label=r'$y = C_0 \, x + C_1$')
plt.legend(loc='best')
plt.xlim(xlim)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
import sys
if __name__=="__main__":
ff=FisherGraph(sys.argv[1])
print(ff.fit()) | mit |
SiLab-Bonn/fe65_p2 | fe65p2/scans/pixel_register_test.py | 1 | 10967 | '''
This scan writes a patterns of bits in the pixel registers
and reads them back. The occuring errors are counted and a
Shmoo plot is produced and printed on a .pdf
The supply voltage and the .bit file loaded can be changed
from here.
Global registers are checked as well and a second Shmoo
plot is printed in the same pdf.
'''
from fe65p2.scan_base import ScanBase
import time
import os
import sys
import bitarray
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.table import Table
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
local_configuration = {
"mask_steps": 4,
"columns": [True] * 16,
# DAC parameters
"PrmpVbpDac": 36,
"vthin1Dac": 255,
"vthin2Dac": 0,
"vffDac": 42,
"PrmpVbnFolDac": 51,
"vbnLccDac": 1,
"compVbnDac": 25,
"preCompVbnDac": 50
}
class proofread_scan(ScanBase):
scan_id = "proof_read_scan"
def __init__(self, dut_conf=None):
super(proofread_scan, self).__init__(dut_conf)
def scan(self, mask_steps=4, columns=[True] * 16, **kwargs):
# bitfiles = ["fe65p2_mio_40MHz.bit"]
# voltages = [2.0]
self.dut['global_conf']['PrmpVbpDac'] = kwargs['PrmpVbpDac']
self.dut['global_conf']['vthin1Dac'] = kwargs['vthin1Dac']
self.dut['global_conf']['vthin2Dac'] = kwargs['vthin2Dac']
self.dut['global_conf']['vffDac'] = 42
self.dut['global_conf']['PrmpVbnFolDac'] = kwargs['PrmpVbnFolDac']
self.dut['global_conf']['vbnLccDac'] = kwargs['vbnLccDac']
self.dut['global_conf']['compVbnDac'] = kwargs['compVbnDac']
self.dut['global_conf']['preCompVbnDac'] = 50
#scan_path = os.path.dirname(os.path.realpath(sys.argv[0]))
#path = scan_path.replace('fe65p2/scans','firmware/bits/goodSPI_bits/')
path = "/home/topcoup/Applications/fe65_p2/firmware/bits/goodSPI_bits/"
self.bitfiles = ["fe65p2_mio_3MHz.bit", "fe65p2_mio_4MHz.bit", "fe65p2_mio_6MHz.bit",
"fe65p2_mio_8MHz.bit", "fe65p2_mio_12MHz.bit", "fe65p2_mio_16MHz.bit",
"fe65p2_mio_24MHz.bit", "fe65p2_mio_32MHz.bit"]
self.voltages = [1.25, 1.2, 1.1, 1.0, 0.95, 0.90]
self.shmoo_errors = []
self.shmoo_global_errors = []
for bitfile in self.bitfiles:
logging.info("Loading " + bitfile)
setstatus = self.dut['intf']._sidev.DownloadXilinx(path + bitfile)
try:
setstatus == 0
except:
break
for volt in self.voltages:
self.dut['control']['RESET'] = 1
self.dut['control'].write()
self.dut['control']['RESET'] = 0
self.dut['control'].write()
# to change the supply voltage
self.dut['VDDA'].set_current_limit(200, unit='mA')
self.dut['VDDA'].set_voltage(volt, unit='V')
self.dut['VDDA'].set_enable(True)
self.dut['VDDD'].set_voltage(volt, unit='V')
self.dut['VDDD'].set_enable(True)
self.dut['VAUX'].set_voltage(1.25, unit='V')
self.dut['VAUX'].set_enable(True)
# global reg
self.dut['global_conf']['PrmpVbpDac'] = kwargs['PrmpVbpDac']
self.dut['global_conf']['vthin1Dac'] = kwargs['vthin1Dac']
self.dut['global_conf']['vthin2Dac'] = kwargs['vthin2Dac']
self.dut['global_conf']['vffDac'] = 42
self.dut['global_conf']['PrmpVbnFolDac'] = 51
self.dut['global_conf']['vbnLccDac'] = 51
self.dut['global_conf']['compVbnDac'] = kwargs['compVbnDac']
self.dut['global_conf']['preCompVbnDac'] = 50
self.dut['global_conf']['OneSr'] = 1
self.dut['global_conf']['SPARE'] = 0 # added by me, default 0
self.dut['global_conf']['ColEn'] = 0 # added by me, default 0
# added by me, default 15
self.dut['global_conf']['ColSrEn'] = 15
# added by me, default 0
self.dut['global_conf']['Latency'] = 400
self.dut['global_conf']['ColSrEn'].setall(
True) # enable programming of all columns
self.dut.write_global()
self.dut.write_global() # need to write 2 times!
logging.info(self.dut.power_status()) # prints power supply
send = self.dut['global_conf'].tobytes()
rec = self.dut['global_conf'].get_data(size=19)
rec[18] = rec[18] & 0b1000000
glob_errors = [i for i in range(
len(send)) if send[i] != rec[i]]
if (len(glob_errors) > 0):
logging.warning("*** GLOBAL ERRORS " +
str(len(glob_errors)))
self.shmoo_global_errors.append(len(glob_errors))
# for j in range(len(glob_errors)):
# print "in position ", j, "value ", glob_errors[j] #if you want to know where is the error
# pixel reg
self.dut['pixel_conf'][0] = 1
self.dut.write_pixel()
self.dut['control']['RESET'] = 0b11
self.dut['control'].write()
lmask = ([0] * (mask_steps - 1)) + [1] # 1,0,0,0 pattern
lmask = lmask * ((64 * 64) / mask_steps + 1)
# 1,0,0,0 pattern for a total of 4096 bits
lmask = lmask[:64 * 64]
bv_mask = bitarray.bitarray(lmask) # convert in binary
errors = [] # used for screen output - debug
ERR = [] # pixel errors storage
err_count = 0
logging.info('Temperature: %s', str(
self.dut['ntc'].get_temperature('C')))
for i in range(0, 4):
self.dut['pixel_conf'][:] = bv_mask
self.dut.write_pixel()
self.dut.write_pixel()
time.sleep(0.5)
returned_data = ''.join(format(x, '08b')
for x in self.dut['pixel_conf'].get_data())
# the readout comes upside down
returned_data_reversed = returned_data[::-1]
pix_send = bv_mask
pix_rec = bitarray.bitarray(returned_data_reversed)
logging.debug('s ' + str(pix_send[:8]))
logging.debug('r ' + str(pix_rec[:8]))
errors.append([])
for bit in xrange(len(pix_send)):
if pix_send[bit] != pix_rec[bit]:
errors[i].append(bit)
ERR.append(bit)
err_count += 1
time.sleep(0.2)
# shift the bit pattern
bv_mask = bv_mask[1:] + bv_mask[:1]
self.shmoo_errors.append(err_count)
if len(errors[i]) > 0:
logging.warning("*** PIXEL ERRORS ***")
for i in range(0, len(errors)):
logging.warning("iteration " + str(i) + " errors " + str(
len(errors[i]))) # , " at ", ' '.join([str(x) for x in errors[i]])
def shmoo_plotting(self):
''' pixel register shmoo plot '''
plotname = "PixReg_" + str(time.strftime("%Y%m%d_%H%M%S_")) + ".pdf"
shmoopdf = PdfPages(plotname)
shmoonp = np.array(self.shmoo_errors)
data = shmoonp.reshape(len(self.voltages), -1, order='F')
fig, ax = plt.subplots()
plt.title('Pixel registers errors')
ax.set_axis_off()
fig.text(0.70, 0.05, 'SPI clock (MHz)', fontsize=14)
fig.text(0.02, 0.90, 'Supply voltage (V)', fontsize=14, rotation=90)
tb = Table(ax, bbox=[0.01, 0.01, 0.99, 0.99])
ncols = len(self.bitfiles)
nrows = len(self.voltages)
width, height = 1.0 / ncols, 1.0 / nrows
# Add cells
for (i, j), val in np.ndenumerate(data):
color = ''
if val == 0:
color = 'green'
if (val > 0 & val < 10):
color = 'yellow'
if val > 10:
color = 'red'
tb.add_cell(i, j, width, height, text=str(val),
loc='center', facecolor=color)
# Row Labels...
for i in range(len(self.voltages)):
tb.add_cell(i, -1, width, height, text=str(self.voltages[i]) + 'V', loc='right',
edgecolor='none', facecolor='none')
# Column Labels...
for j in range(len(self.bitfiles)):
freq_label = self.bitfiles[j].replace(
'fe65p2_mio_', '').replace('MHz.bit', '')
tb.add_cell(nrows + 1, j, width, height / 2, text=freq_label + ' MHz', loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
shmoopdf.savefig()
''' global register shmoo plot '''
shmoo_glob_np = np.array(self.shmoo_global_errors)
data_g = shmoo_glob_np.reshape(len(self.voltages), -1, order='F')
fig_g, ax_g = plt.subplots()
ax_g.set_axis_off()
fig_g.text(0.70, 0.05, 'SPI clock (MHz)', fontsize=14)
fig_g.text(0.02, 0.90, 'Supply voltage (V)', fontsize=14, rotation=90)
tb_g = Table(ax_g, bbox=[0.01, 0.01, 0.99, 0.99])
plt.title('Global registers errors')
# Add cells
for (i, j), val_g in np.ndenumerate(data_g):
color = ''
if val_g == 0:
color = 'green'
if val_g > 0:
color = 'red'
tb_g.add_cell(i, j, width, height, text=str(val_g),
loc='center', facecolor=color)
# Row Labels...
for i in range(len(self.voltages)):
tb_g.add_cell(i, -1, width, height, text=str(self.voltages[i]) + 'V', loc='right',
edgecolor='none', facecolor='none')
# Column Labels...
for j in range(len(self.bitfiles)):
freq_label = self.bitfiles[j].replace(
'fe65p2_mio_', '').replace('MHz.bit', '')
tb_g.add_cell(nrows + 1, j, width, height / 2, text=freq_label + ' MHz', loc='center',
edgecolor='none', facecolor='none')
ax_g.add_table(tb_g)
shmoopdf.savefig()
shmoopdf.close()
if __name__ == "__main__":
scan = proofread_scan(
"/home/user/Desktop/carlo/fe65_p2/fe65p2/fe65p2.yaml")
scan.start(**local_configuration)
scan.shmoo_plotting()
| gpl-2.0 |
scenarios/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 18 | 2568 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
herilalaina/scikit-learn | examples/plot_kernel_ridge_regression.py | 15 | 6336 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
# #############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))
X_plot = np.linspace(0, 5, 100000)[:, None]
# #############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
# #############################################################################
# Look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2, edgecolors=(0, 0, 0))
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1,
edgecolors=(0, 0, 0))
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5))
sizes = np.logspace(1, 4, 7, dtype=np.int)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="neg_mean_squared_error", cv=10)
plt.plot(train_sizes, -test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, -test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
DiCarloLab-Delft/PycQED_py3 | pycqed/instrument_drivers/meta_instrument/LutMans/flux_lutman_vcz.py | 1 | 44225 | from .base_lutman import Base_LutMan, get_wf_idx_from_name
import numpy as np
from copy import copy
from qcodes.instrument.parameter import ManualParameter, InstrumentRefParameter
from qcodes.utils import validators as vals
from pycqed.measurement.waveform_control_CC import waveform as wf
from pycqed.measurement.waveform_control_CC import waveforms_flux as wfl
from pycqed.measurement.waveform_control_CC import waveforms_vcz as wf_vcz
import PyQt5
from qcodes.plots.pyqtgraph import QtPlot
import matplotlib.pyplot as plt
from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel
import time
import logging
log = logging.getLogger(__name__)
"""
The default schema of this LutMap allows for 4 different 2Q gates.
NW NE
\ /
Q
/ \
SW SE
First codeword is assigned to idling.
Codewords 2-5 are assigned to the two-qubit gates in clockwise order
(NE - SE - SW - NW)
Then we assign single qubit fluxing operations (parking and square)
Last codeword is reserved for custom waveforms defined by the user.
Args:
lutmap
Return:
valid (bool)
The schema for a lutmap is a dictionary with integer keys.
Every item in the dictionary must have the following keys:
"name" : str
"type" : one of valid_types
{'idle', 'cz', 'idle_z', 'square', 'custom'}
"which": str, optional used for two qubit flux pulses and one of
{"NE", "SE", "SW", "NW"}
"""
_def_lm = {
0: {"name": "i", "type": "idle"},
1: {"name": "cz_NE", "type": "idle_z", "which": "NE"},
2: {"name": "cz_SE", "type": "cz", "which": "SE"},
3: {"name": "cz_SW", "type": "cz", "which": "SW"},
4: {"name": "cz_NW", "type": "idle_z", "which": "NW"},
5: {"name": "park", "type": "square"},
6: {"name": "square", "type": "square"},
7: {"name": "custom_wf", "type": "custom"},
}
class Base_Flux_LutMan(Base_LutMan):
"""
The default scheme of this LutMap allows for 4 different 2Q gates.
NW NE
\ /
Q
/ \
SW SE
"""
def render_wave(
self,
wave_name,
time_units="s",
reload_pulses: bool = True,
render_distorted_wave: bool = True,
QtPlot_win=None,
):
"""
Renders a waveform
"""
if reload_pulses:
self.generate_standard_waveforms()
x = np.arange(len(self._wave_dict[wave_name]))
y = self._wave_dict[wave_name]
if time_units == "lut_index":
xlab = ("Lookuptable index", "i")
elif time_units == "s":
x = x / self.sampling_rate()
xlab = ("Time", "s")
if QtPlot_win is None:
QtPlot_win = QtPlot(window_title=wave_name, figsize=(600, 400))
if render_distorted_wave:
if wave_name in self._wave_dict_dist.keys():
x2 = np.arange(len(self._wave_dict_dist[wave_name]))
if time_units == "s":
x2 = x2 / self.sampling_rate()
y2 = self._wave_dict_dist[wave_name]
QtPlot_win.add(
x=x2,
y=y2,
name=wave_name + " distorted",
symbol="o",
symbolSize=5,
xlabel=xlab[0],
xunit=xlab[1],
ylabel="Amplitude",
yunit="dac val.",
)
else:
log.warning("Wave not in distorted wave dict")
# Plotting the normal one second ensures it is on top.
QtPlot_win.add(
x=x,
y=y,
name=wave_name,
symbol="o",
symbolSize=5,
xlabel=xlab[0],
xunit=xlab[1],
ylabel="Amplitude",
yunit="V",
)
return QtPlot_win
class HDAWG_Flux_LutMan(Base_Flux_LutMan):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self._wave_dict_dist = dict()
self.sampling_rate(2.4e9)
self._add_qubit_parameters()
self._add_cz_sim_parameters()
def set_default_lutmap(self):
"""Set the default lutmap for standard microwave drive pulses."""
self.LutMap(_def_lm.copy())
def generate_standard_waveforms(self):
"""
Generate all the standard waveforms and populates self._wave_dict
"""
self._wave_dict = {}
# N.B. the naming convention ._gen_{waveform_name} must be preserved
# as it is used in the load_waveform_onto_AWG_lookuptable method.
self._wave_dict["i"] = self._gen_i()
self._wave_dict["square"] = self._gen_square()
self._wave_dict["park"] = self._gen_park()
self._wave_dict["custom_wf"] = self._gen_custom_wf()
for _, waveform in self.LutMap().items():
wave_name = waveform["name"]
if waveform["type"] == "cz" or waveform["type"] == "idle_z":
which_gate = waveform["which"]
if waveform["type"] == "cz":
self._wave_dict[wave_name] = self._gen_cz(which_gate=which_gate)
elif waveform["type"] == "idle_z":
# The vcz pulse itself has all parameters necessary for the correction
self._wave_dict[wave_name] = self._gen_cz(which_gate=which_gate)
def _gen_i(self):
return np.zeros(int(self.idle_pulse_length() * self.sampling_rate()))
def _gen_square(self):
return wf.single_channel_block(
amp=self.sq_amp(),
length=self.sq_length(),
sampling_rate=self.sampling_rate(),
delay=self.sq_delay(),
)
def _gen_park(self):
if self.park_double_sided():
ones = np.ones(int(self.park_length() * self.sampling_rate() / 2))
pulse_pos = self.park_amp() * ones
return np.concatenate((pulse_pos, - pulse_pos))
else:
return self.park_amp() * np.ones(
int(self.park_length() * self.sampling_rate())
)
def _add_qubit_parameters(self):
"""
Adds parameters responsible for keeping track of qubit frequencies,
coupling strengths etc.
"""
self.add_parameter(
"q_polycoeffs_freq_01_det",
docstring="Coefficients of the polynomial used to convert "
"amplitude in V to detuning in Hz. \nN.B. it is important to "
"include both the AWG range and channel amplitude in the params.\n"
"N.B.2 Sign convention: positive detuning means frequency is "
"higher than current frequency, negative detuning means its "
"smaller.\n"
"In order to convert a set of cryoscope flux arc coefficients to "
" units of Volts they can be rescaled using [c0*sc**2, c1*sc, c2]"
" where sc is the desired scaling factor that includes the sq_amp "
"used and the range of the AWG (5 in amp mode).",
vals=vals.Arrays(),
# initial value is chosen to not raise errors
initial_value=np.array([-2e9, 0, 0]),
parameter_class=ManualParameter,
)
self.add_parameter(
"q_polycoeffs_anharm",
docstring="coefficients of the polynomial used to calculate "
"the anharmonicity (Hz) as a function of amplitude in V. "
"N.B. it is important to "
"include both the AWG range and channel amplitude in the params.\n",
vals=vals.Arrays(),
# initial value sets a flux independent anharmonicity of 300MHz
initial_value=np.array([0, 0, -300e6]),
parameter_class=ManualParameter,
)
self.add_parameter(
"q_freq_01",
vals=vals.Numbers(),
docstring="Current operating frequency of qubit",
# initial value is chosen to not raise errors
initial_value=6e9,
unit="Hz",
parameter_class=ManualParameter,
)
for this_cz in ["NE", "NW", "SW", "SE"]:
self.add_parameter(
"q_freq_10_%s" % this_cz,
vals=vals.Numbers(),
docstring="Current operating frequency of qubit"
" with which a CZ gate can be performed.",
# initial value is chosen to not raise errors
initial_value=6e9,
unit="Hz",
parameter_class=ManualParameter,
)
self.add_parameter(
"q_J2_%s" % this_cz,
vals=vals.Numbers(1e3, 500e6),
unit="Hz",
docstring="effective coupling between the 11 and 02 states.",
# initial value is chosen to not raise errors
initial_value=15e6,
parameter_class=ManualParameter,
)
def _add_waveform_parameters(self):
# CODEWORD 1: Idling
self.add_parameter(
"idle_pulse_length",
unit="s",
label="Idling pulse length",
initial_value=40e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
# CODEWORDS 1-4: CZ
# [2020-06-23] This dictionary is added here to be extended if a new or
# different flux waveform for cz is to be tested
# The cz waveform generators receive the `fluxlutman` and `which_gate`
# as arguments
self._cz_wf_generators_dict = {
"vcz_waveform": wf_vcz.vcz_waveform
}
for this_cz in ["NE", "NW", "SW", "SE"]:
self.add_parameter(
"cz_wf_generator_%s" % this_cz,
initial_value="vcz_dev_waveform",
vals=vals.Strings(),
parameter_class=ManualParameter,
)
wf_vcz.add_vcz_parameters(self, which_gate=this_cz)
# CODEWORD 5: Parking
self.add_parameter(
"park_length",
unit="s",
label="Parking pulse duration",
initial_value=40e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"park_amp",
initial_value=0,
label="Parking pulse amp. pos.",
docstring="Parking pulse amplitude if `park_double_sided` is `False`, "
"or positive amplitude for Net-Zero",
unit="dac value",
vals=vals.Numbers(),
parameter_class=ManualParameter,
)
self.add_parameter(
"park_double_sided",
initial_value=False,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
# CODEWORD 6: SQUARE
self.add_parameter(
"sq_amp",
initial_value=0.5,
# units is part of the total range of AWG8
label="Square pulse amplitude",
unit="dac value",
vals=vals.Numbers(),
parameter_class=ManualParameter,
)
self.add_parameter(
"sq_length",
unit="s",
label="Square pulse duration",
initial_value=40e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"sq_delay",
unit="s",
label="Square pulse delay",
initial_value=0e-9,
vals=vals.Numbers(0, 100e-6),
parameter_class=ManualParameter,
)
# CODEWORD 7: CUSTOM
self.add_parameter(
"custom_wf",
initial_value=np.array([]),
label="Custom waveform",
docstring=(
"Specifies a custom waveform, note that "
"`custom_wf_length` is used to cut of the waveform if"
"it is set."
),
parameter_class=ManualParameter,
vals=vals.Arrays(),
)
self.add_parameter(
"custom_wf_length",
unit="s",
label="Custom waveform length",
initial_value=np.inf,
docstring=(
"Used to determine at what sample the custom waveform "
"is forced to zero. This is used to facilitate easy "
"cryoscope measurements of custom waveforms."
),
parameter_class=ManualParameter,
vals=vals.Numbers(min_value=0),
)
def _gen_cz(self, which_gate, regenerate_cz=True):
gate_str = "cz_%s" % which_gate
wf_generator_name = self.get("cz_wf_generator_{}".format(which_gate))
wf_generator = self._cz_wf_generators_dict[wf_generator_name]
if regenerate_cz:
self._wave_dict[gate_str] = wf_generator(self, which_gate=which_gate)
cz_pulse = self._wave_dict[gate_str]
return cz_pulse
def calc_amp_to_eps(
self,
amp: float,
state_A: str = "01",
state_B: str = "02",
which_gate: str = "NE",
):
"""
Calculates detuning between two levels as a function of pulse
amplitude in Volt.
ε(V) = f_B (V) - f_A (V)
Args:
amp (float) : amplitude in Volt
state_A (str) : string of 2 numbers denoting the state. The numbers
correspond to the number of excitations in each qubits.
The LSQ (right) corresponds to the qubit being fluxed and
under control of this flux lutman.
state_B (str) :
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
polycoeffs_A = self.get_polycoeffs_state(state=state_A, which_gate=which_gate)
polycoeffs_B = self.get_polycoeffs_state(state=state_B, which_gate=which_gate)
polycoeffs = polycoeffs_B - polycoeffs_A
return np.polyval(polycoeffs, amp)
def calc_eps_to_dac(
self,
eps,
state_A: str = "01",
state_B: str = "02",
which_gate: str = "NE",
positive_branch=True,
):
"""
See `calc_eps_to_amp`
"""
return (
self.calc_eps_to_amp(eps, state_A, state_B, which_gate, positive_branch)
* self.get_amp_to_dac_val_scalefactor()
)
def calc_eps_to_amp(
self,
eps,
state_A: str = "01",
state_B: str = "02",
which_gate: str = "NE",
positive_branch=True,
):
"""
Calculates amplitude in Volt corresponding to an energy difference
between two states in Hz.
V(ε) = V(f_b - f_a)
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
# recursive allows dealing with an array of freqs
if isinstance(eps, (list, np.ndarray)):
return np.array(
[
self.calc_eps_to_amp(
eps=e,
state_A=state_A,
state_B=state_B,
which_gate=which_gate,
positive_branch=positive_branch,
)
for e in eps
]
)
polycoeffs_A = self.get_polycoeffs_state(state=state_A, which_gate=which_gate)
if state_B is not None:
polycoeffs_B = self.get_polycoeffs_state(
state=state_B, which_gate=which_gate
)
polycoeffs = polycoeffs_B - polycoeffs_A
else:
polycoeffs = copy(polycoeffs_A)
polycoeffs[-1] = 0
p = np.poly1d(polycoeffs)
sols = (p - eps).roots
# sols returns 2 solutions (for a 2nd order polynomial)
if positive_branch:
sol = np.max(sols)
else:
sol = np.min(sols)
# imaginary part is ignored, instead sticking to closest real value
# float is because of a typecasting bug in np 1.12 (solved in 1.14)
return float(np.real(sol))
def calc_net_zero_length_ratio(self, which_gate: str = "NE"):
"""
Determine the lenght ratio of the net-zero pulses based on the
parameter "czd_length_ratio".
If czd_length_ratio is set to auto, uses the interaction amplitudes
to determine the scaling of lengths. Note that this is a coarse
approximation.
"""
czd_length_ratio = self.get("czd_length_ratio_%s" % which_gate)
if czd_length_ratio != "auto":
return czd_length_ratio
else:
amp_J2_pos = self.calc_eps_to_amp(
0,
state_A="11",
state_B="02",
which_gate=which_gate,
positive_branch=True,
)
amp_J2_neg = self.calc_eps_to_amp(
0,
state_A="11",
state_B="02",
which_gate=which_gate,
positive_branch=False,
)
# lr chosen to satisfy (amp_pos*lr + amp_neg*(1-lr) = 0 )
lr = -amp_J2_neg / (amp_J2_pos - amp_J2_neg)
return lr
def get_polycoeffs_state(self, state: str, which_gate: str = "NE"):
"""
Args:
state (str) : string of 2 numbers denoting the state. The numbers
correspond to the number of excitations in each qubits.
The LSQ (right) corresponds to the qubit being fluxed and
under control of this flux lutman.
Get's the polynomial coefficients that are used to calculate the
energy levels of specific states.
Note that avoided crossings are not taken into account here.
N.B. The value of which_gate (and its default) only affect the
other qubits (here noted as MSQ)
"""
# Depending on the interaction (North or South) this qubit fluxes or not.
# depending or whether it fluxes, it is LSQ or MSQ
# depending on that, we use q_polycoeffs_freq_01_det or q_polycoeffs_freq_NE_det
polycoeffs = np.zeros(3)
freq_10 = self.get("q_freq_10_%s" % which_gate)
if state == "00":
pass
elif state == "01":
polycoeffs += self.q_polycoeffs_freq_01_det()
polycoeffs[2] += self.q_freq_01()
elif state == "02":
polycoeffs += 2 * self.q_polycoeffs_freq_01_det()
polycoeffs += self.q_polycoeffs_anharm()
polycoeffs[2] += 2 * self.q_freq_01()
elif state == "10":
polycoeffs[2] += freq_10
elif state == "11":
polycoeffs += self.q_polycoeffs_freq_01_det()
polycoeffs[2] += self.q_freq_01() + freq_10
else:
raise ValueError("State {} not recognized".format(state))
return polycoeffs
def _get_awg_channel_amplitude(self):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1
awg_nr = awg_ch // 2
ch_pair = awg_ch % 2
channel_amp = AWG.get("awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair))
return channel_amp
def _set_awg_channel_amplitude(self, val):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1
awg_nr = awg_ch // 2
ch_pair = awg_ch % 2
AWG.set("awgs_{}_outputs_{}_amplitude".format(awg_nr, ch_pair), val)
def _get_awg_channel_range(self):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel() - 1 # -1 is to account for starting at 1
# channel range of 5 corresponds to -2.5V to +2.5V
for i in range(5):
channel_range_pp = AWG.get("sigouts_{}_range".format(awg_ch))
if channel_range_pp is not None:
break
time.sleep(0.5)
return channel_range_pp
def _get_wf_name_from_cw(self, codeword: int):
for idx, waveform in self.LutMap().items():
if int(idx) == codeword:
return waveform["name"]
raise ValueError("Codeword {} not specified" " in LutMap".format(codeword))
def _get_cw_from_wf_name(self, wf_name: str):
for idx, waveform in self.LutMap().items():
if wf_name == waveform["name"]:
return int(idx)
raise ValueError("Waveform {} not specified" " in LutMap".format(wf_name))
def _gen_custom_wf(self):
base_wf = copy(self.custom_wf())
if self.custom_wf_length() != np.inf:
# cuts of the waveform at a certain length by setting
# all subsequent samples to 0.
max_sample = int(self.custom_wf_length() * self.sampling_rate())
base_wf[max_sample:] = 0
return base_wf
def calc_freq_to_amp(
self,
freq: float,
state: str = "01",
which_gate: str = "NE",
positive_branch=True,
):
"""
Calculates amplitude in Volt corresponding to the energy of a state
in Hz.
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
return self.calc_eps_to_amp(
eps=freq,
state_B=state,
state_A="00",
positive_branch=positive_branch,
which_gate=which_gate,
)
def _add_cfg_parameters(self):
self.add_parameter(
"cfg_awg_channel",
initial_value=1,
vals=vals.Ints(1, 8),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_distort",
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_append_compensation",
docstring=(
"If True compensation pulses will be added to individual "
" waveforms creating very long waveforms for each codeword"
),
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_compensation_delay",
initial_value=3e-6,
unit="s",
vals=vals.Numbers(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_pre_pulse_delay",
unit="s",
label="Pre pulse delay",
docstring="This parameter is used for fine timing corrections, the"
" correction is applied in distort_waveform.",
initial_value=0e-9,
vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"instr_distortion_kernel", parameter_class=InstrumentRefParameter
)
self.add_parameter(
"instr_partner_lutman", # FIXME: unused?
docstring="LutMan responsible for the corresponding"
"channel in the AWG8 channel pair. "
"Reference is used when uploading waveforms",
parameter_class=InstrumentRefParameter,
)
self.add_parameter(
"_awgs_fl_sequencer_program_expected_hash", # FIXME: un used?
docstring="crc32 hash of the awg8 sequencer program. "
"This parameter is used to dynamically determine "
"if the program needs to be uploaded. The initial_value is"
" None, indicating that the program needs to be uploaded."
" After the first program is uploaded, the value is set.",
initial_value=None,
vals=vals.Ints(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_max_wf_length",
parameter_class=ManualParameter,
initial_value=10e-6,
unit="s",
vals=vals.Numbers(0, 100e-6),
)
self.add_parameter(
"cfg_awg_channel_range",
docstring="peak peak value, channel range of 5 corresponds to -2.5V to +2.5V",
get_cmd=self._get_awg_channel_range,
unit="V_pp",
)
self.add_parameter(
"cfg_awg_channel_amplitude",
docstring="digital scale factor between 0 and 1",
get_cmd=self._get_awg_channel_amplitude,
set_cmd=self._set_awg_channel_amplitude,
unit="a.u.",
vals=vals.Numbers(0, 1),
)
def get_dac_val_to_amp_scalefactor(self):
"""
Returns the scale factor to transform an amplitude in 'dac value' to an
amplitude in 'V'.
"dac_value" refers to the value between -1 and +1 that is set in a
waveform.
N.B. the implementation is specific to this type of AWG
"""
if self.AWG() is None:
log.warning("No AWG present, returning unity scale factor.")
return 1
channel_amp = self.cfg_awg_channel_amplitude()
channel_range_pp = self.cfg_awg_channel_range()
# channel range of 5 corresponds to -2.5V to +2.5V
scalefactor = channel_amp * (channel_range_pp / 2)
return scalefactor
def get_amp_to_dac_val_scalefactor(self):
if self.get_dac_val_to_amp_scalefactor() == 0:
# Give a warning and don't raise an error as things should not
# break because of this.
log.warning(
'AWG amp to dac scale factor is 0, check "{}" '
"output amplitudes".format(self.AWG())
)
return 1
return 1 / self.get_dac_val_to_amp_scalefactor()
def calc_amp_to_freq(self, amp: float, state: str = "01", which_gate: str = "NE"):
"""
Converts pulse amplitude in Volt to energy in Hz for a particular state
Args:
amp (float) : amplitude in Volt
state (str) : string of 2 numbers denoting the state. The numbers
correspond to the number of excitations in each qubits.
The LSQ (right) corresponds to the qubit being fluxed and
under control of this flux lutman.
N.B. this method assumes that the polycoeffs are with respect to the
amplitude in units of V, including rescaling due to the channel
amplitude and range settings of the AWG8.
See also `self.get_dac_val_to_amp_scalefactor`.
N.B. The value of which_gate (and its default) only affect the
other qubit frequencies (here noted as MSQ 10)
amp_Volts = amp_dac_val * channel_amp * channel_range
"""
polycoeffs = self.get_polycoeffs_state(state=state, which_gate=which_gate)
return np.polyval(polycoeffs, amp)
#################################
# Waveform loading methods #
#################################
def load_waveform_onto_AWG_lookuptable(
self, wave_id: str, regenerate_waveforms: bool = False
):
"""
Loads a specific waveform to the AWG
"""
# Here we are ductyping to determine if the waveform name or the
# codeword was specified.
if type(wave_id) == str:
waveform_name = wave_id
codeword = get_wf_idx_from_name(wave_id, self.LutMap())
else:
waveform_name = self.LutMap()[wave_id]["name"]
codeword = wave_id
if regenerate_waveforms:
# only regenerate the one waveform that is desired
if "cz" in waveform_name:
# CZ gates contain information on which pair (NE, SE, SW, NW)
# the gate is performed with this is specified in which_gate.
gen_wf_func = getattr(self, "_gen_cz")
self._wave_dict[waveform_name] = gen_wf_func(
which_gate=waveform_name[3:]
)
else:
gen_wf_func = getattr(self, "_gen_{}".format(waveform_name))
self._wave_dict[waveform_name] = gen_wf_func()
waveform = self._wave_dict[waveform_name]
codeword_str = "wave_ch{}_cw{:03}".format(self.cfg_awg_channel(), codeword)
if self.cfg_append_compensation():
waveform = self.add_compensation_pulses(waveform)
if self.cfg_distort():
# This is where the fixed length waveform is
# set to cfg_max_wf_length
waveform = self.distort_waveform(waveform)
self._wave_dict_dist[waveform_name] = waveform
else:
# This is where the fixed length waveform is
# set to cfg_max_wf_length
waveform = self._append_zero_samples(waveform)
self._wave_dict_dist[waveform_name] = waveform
self.AWG.get_instr().set(codeword_str, waveform)
def load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms: bool = True, stop_start: bool = True
):
"""
Loads all waveforms specified in the LutMap to an AWG for both this
LutMap and the partner LutMap.
Args:
regenerate_waveforms (bool): if True calls
generate_standard_waveforms before uploading.
stop_start (bool): if True stops and starts the AWG.
"""
AWG = self.AWG.get_instr()
if stop_start:
AWG.stop()
for idx, waveform in self.LutMap().items():
self.load_waveform_onto_AWG_lookuptable(
wave_id=idx, regenerate_waveforms=regenerate_waveforms
)
self.cfg_awg_channel_amplitude()
self.cfg_awg_channel_range()
if stop_start:
AWG.start()
def _append_zero_samples(self, waveform):
"""
Helper method to ensure waveforms have the desired length
"""
length_samples = roundup1024(
int(self.sampling_rate() * self.cfg_max_wf_length())
)
extra_samples = length_samples - len(waveform)
if extra_samples >= 0:
y_sig = np.concatenate([waveform, np.zeros(extra_samples)])
else:
y_sig = waveform[:extra_samples]
return y_sig
def add_compensation_pulses(self, waveform):
"""
Adds the inverse of the pulses at the end of a waveform to
ensure flux discharging.
"""
wf = np.array(waveform) # catches a rare bug when wf is a list
delay_samples = np.zeros(
int(self.sampling_rate() * self.cfg_compensation_delay())
)
comp_wf = np.concatenate([wf, delay_samples, -1 * wf])
return comp_wf
def distort_waveform(self, waveform, inverse=False):
"""
Modifies the ideal waveform to correct for distortions and correct
fine delays.
Distortions are corrected using the kernel object.
"""
k = self.instr_distortion_kernel.get_instr()
# Prepend zeros to delay waveform to correct for fine timing
delay_samples = int(self.cfg_pre_pulse_delay() * self.sampling_rate())
waveform = np.pad(waveform, (delay_samples, 0), "constant")
# duck typing the distort waveform method
if hasattr(k, "distort_waveform"):
distorted_waveform = k.distort_waveform(
waveform,
length_samples=int(
roundup1024(self.cfg_max_wf_length() * self.sampling_rate())
),
inverse=inverse,
)
else: # old kernel object does not have this method
if inverse:
raise NotImplementedError()
distorted_waveform = k.convolve_kernel(
[k.kernel(), waveform],
length_samples=int(self.cfg_max_wf_length() * self.sampling_rate()),
)
return distorted_waveform
#################################
# Plotting methods #
#################################
def plot_cz_trajectory(self, axs=None, show=True, which_gate="NE"):
"""
Plots the cz trajectory in frequency space.
"""
q_J2 = self.get("q_J2_%s" % which_gate)
if axs is None:
f, axs = plt.subplots(figsize=(5, 7), nrows=3, sharex=True)
dac_amps = self._wave_dict["cz_%s" % which_gate]
t = np.arange(0, len(dac_amps)) * 1 / self.sampling_rate()
CZ_amp = dac_amps * self.get_dac_val_to_amp_scalefactor()
CZ_eps = self.calc_amp_to_eps(CZ_amp, "11", "02", which_gate=which_gate)
CZ_theta = wfl.eps_to_theta(CZ_eps, q_J2)
axs[0].plot(t, np.rad2deg(CZ_theta), marker=".")
axs[0].fill_between(t, np.rad2deg(CZ_theta), color="C0", alpha=0.5)
set_ylabel(axs[0], r"$\theta$", "deg")
axs[1].plot(t, CZ_eps, marker=".")
axs[1].fill_between(t, CZ_eps, color="C0", alpha=0.5)
set_ylabel(axs[1], r"$\epsilon_{11-02}$", "Hz")
axs[2].plot(t, CZ_amp, marker=".")
axs[2].fill_between(t, CZ_amp, color="C0", alpha=0.1)
set_xlabel(axs[2], "Time", "s")
set_ylabel(axs[2], r"Amp.", "V")
# axs[2].set_ylim(-1, 1)
axs[2].axhline(0, lw=0.2, color="grey")
CZ_amp_pred = self.distort_waveform(CZ_amp)[: len(CZ_amp)]
axs[2].plot(t, CZ_amp_pred, marker=".")
axs[2].fill_between(t, CZ_amp_pred, color="C1", alpha=0.3)
if show:
plt.show()
return axs
def plot_level_diagram(self, ax=None, show=True, which_gate="NE"):
"""
Plots the level diagram as specified by the q_ parameters.
1. Plotting levels
2. Annotating feature of interest
3. Adding legend etc.
4. Add a twin x-axis to denote scale in dac amplitude
"""
if ax is None:
f, ax = plt.subplots()
# 1. Plotting levels
# maximum voltage of AWG in amp mode
amps = np.linspace(-2.5, 2.5, 101)
freqs = self.calc_amp_to_freq(amps, state="01", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{01}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="01", which_gate=which_gate),
"01",
color="C0",
ha="left",
va="bottom",
clip_on=True,
)
freqs = self.calc_amp_to_freq(amps, state="02", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{02}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="02", which_gate=which_gate),
"02",
color="C1",
ha="left",
va="bottom",
clip_on=True,
)
freqs = self.calc_amp_to_freq(amps, state="10", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{10}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="10", which_gate=which_gate),
"10",
color="C2",
ha="left",
va="bottom",
clip_on=True,
)
freqs = self.calc_amp_to_freq(amps, state="11", which_gate=which_gate)
ax.plot(amps, freqs, label="$f_{11}$")
ax.text(
0,
self.calc_amp_to_freq(0, state="11", which_gate=which_gate),
"11",
color="C3",
ha="left",
va="bottom",
clip_on=True,
)
# 2. Annotating feature of interest
ax.axvline(0, 0, 1e10, linestyle="dotted", c="grey")
amp_J2 = self.calc_eps_to_amp(
0, state_A="11", state_B="02", which_gate=which_gate
)
amp_J1 = self.calc_eps_to_amp(
0, state_A="10", state_B="01", which_gate=which_gate
)
ax.axvline(amp_J2, ls="--", lw=1, c="C4")
ax.axvline(amp_J1, ls="--", lw=1, c="C6")
f_11_02 = self.calc_amp_to_freq(amp_J2, state="11", which_gate=which_gate)
ax.plot([amp_J2], [f_11_02], color="C4", marker="o", label="11-02")
ax.text(
amp_J2,
f_11_02,
"({:.4f},{:.2f})".format(amp_J2, f_11_02 * 1e-9),
color="C4",
ha="left",
va="bottom",
clip_on=True,
)
f_10_01 = self.calc_amp_to_freq(amp_J1, state="01", which_gate=which_gate)
ax.plot([amp_J1], [f_10_01], color="C5", marker="o", label="10-01")
ax.text(
amp_J1,
f_10_01,
"({:.4f},{:.2f})".format(amp_J1, f_10_01 * 1e-9),
color="C5",
ha="left",
va="bottom",
clip_on=True,
)
# 3. Adding legend etc.
title = "Calibration visualization\n{}\nchannel {}".format(
self.AWG(), self.cfg_awg_channel()
)
leg = ax.legend(title=title, loc=(1.05, 0.3))
leg._legend_box.align = "center"
set_xlabel(ax, "AWG amplitude", "V")
set_ylabel(ax, "Frequency", "Hz")
ax.set_xlim(-2.5, 2.5)
ax.set_ylim(
0, self.calc_amp_to_freq(0, state="02", which_gate=which_gate) * 1.1
)
# 4. Add a twin x-axis to denote scale in dac amplitude
dac_val_axis = ax.twiny()
dac_ax_lims = np.array(ax.get_xlim()) * self.get_amp_to_dac_val_scalefactor()
dac_val_axis.set_xlim(dac_ax_lims)
set_xlabel(dac_val_axis, "AWG amplitude", "dac")
dac_val_axis.axvspan(1, 1000, facecolor=".5", alpha=0.5)
dac_val_axis.axvspan(-1000, -1, facecolor=".5", alpha=0.5)
# get figure is here in case an axis object was passed as input
f = ax.get_figure()
f.subplots_adjust(right=0.7)
if show:
plt.show()
return ax
def plot_cz_waveforms(
self, qubits: list, which_gate_list: list, ax=None, show: bool = True
):
"""
Plots the cz waveforms from several flux lutamns, mainly for
verification, time alignment and debugging
"""
if ax is None:
fig, ax = plt.subplots(1, 1)
flux_lm_list = [
self.find_instrument("flux_lm_{}".format(qubit)) for qubit in qubits
]
for flux_lm, which_gate, qubit in zip(flux_lm_list, which_gate_list, qubits):
flux_lm.generate_standard_waveforms()
waveform_name = "cz_{}".format(which_gate)
ax.plot(
flux_lm._wave_dict[waveform_name],
".-",
label=waveform_name + " " + qubit,
)
ax.legend()
fig = ax.get_figure()
if show:
fig.show()
return fig
#################################
# Simulation methods #
#################################
def _add_cz_sim_parameters(self):
for this_cz in ["NE", "NW", "SW", "SE"]:
self.add_parameter(
"bus_freq_%s" % this_cz,
docstring="[CZ simulation] Bus frequency.",
vals=vals.Numbers(0.1e9, 1000e9),
initial_value=7.77e9,
parameter_class=ManualParameter,
)
self.add_parameter(
"instr_sim_control_CZ_%s" % this_cz,
docstring="Noise and other parameters for CZ simulation.",
parameter_class=InstrumentRefParameter,
)
self.add_parameter(
"step_response",
initial_value=np.array([]),
label="Step response",
docstring=(
"Stores the normalized flux line step response. "
"Intended for use in cz simulations with noise."
),
parameter_class=ManualParameter,
vals=vals.Arrays(),
)
class QWG_Flux_LutMan(HDAWG_Flux_LutMan):
def __init__(self, name, **kw):
super().__init__(name, **kw)
self._wave_dict_dist = dict()
self.sampling_rate(1e9)
def get_dac_val_to_amp_scalefactor(self):
"""
Returns the scale factor to transform an amplitude in 'dac value' to an
amplitude in 'V'.
N.B. the implementation is specific to this type of AWG (QWG)
"""
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel()
channel_amp = AWG.get("ch{}_amp".format(awg_ch))
scale_factor = channel_amp
return scale_factor
def load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms: bool = True, stop_start: bool = True
):
# We inherit from the HDAWG LutMan but do not require the fancy
# loading because the QWG is a simple device!
return Base_Flux_LutMan.load_waveforms_onto_AWG_lookuptable(
self, regenerate_waveforms=regenerate_waveforms, stop_start=stop_start
)
def _get_awg_channel_amplitude(self):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel()
channel_amp = AWG.get("ch{}_amp".format(awg_ch))
return channel_amp
def _set_awg_channel_amplitude(self, val):
AWG = self.AWG.get_instr()
awg_ch = self.cfg_awg_channel()
channel_amp = AWG.set("ch{}_amp".format(awg_ch), val)
return channel_amp
def _add_cfg_parameters(self):
self.add_parameter(
"cfg_awg_channel",
initial_value=1,
vals=vals.Ints(1, 4),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_distort",
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_append_compensation",
docstring=(
"If True compensation pulses will be added to individual "
" waveforms creating very long waveforms for each codeword"
),
initial_value=True,
vals=vals.Bool(),
parameter_class=ManualParameter,
)
self.add_parameter(
"cfg_compensation_delay",
parameter_class=ManualParameter,
initial_value=3e-6,
unit="s",
vals=vals.Numbers(),
)
self.add_parameter(
"cfg_pre_pulse_delay",
unit="s",
label="Pre pulse delay",
docstring="This parameter is used for fine timing corrections, the"
" correction is applied in distort_waveform.",
initial_value=0e-9,
vals=vals.Numbers(0, 1e-6),
parameter_class=ManualParameter,
)
self.add_parameter(
"instr_distortion_kernel", parameter_class=InstrumentRefParameter
)
self.add_parameter(
"cfg_max_wf_length",
parameter_class=ManualParameter,
initial_value=10e-6,
unit="s",
vals=vals.Numbers(0, 100e-6),
)
self.add_parameter(
"cfg_awg_channel_amplitude",
docstring="Output amplitude from 0 to 1.6 V",
get_cmd=self._get_awg_channel_amplitude,
set_cmd=self._set_awg_channel_amplitude,
unit="V",
vals=vals.Numbers(0, 1.6),
)
#########################################################################
# Convenience functions below
#########################################################################
def roundup1024(n):
return int(np.ceil(n / 144) * 144)
| mit |
tpltnt/SimpleCV | SimpleCV/LineScan.py | 1 | 36021 | from __future__ import print_function
from SimpleCV.base import *
import scipy.signal as sps
import scipy.optimize as spo
import numpy as np
import copy, operator
class LineScan(list):
"""
**SUMMARY**
A line scan is a one dimensional signal pulled from the intensity
of a series of a pixels in an image. LineScan allows you to do a series
of operations just like on an image class object. You can also treat the
line scan as a python list object. A linescan object is automatically
generated by calling ImageClass.getLineScan on an image. You can also
roll your own by declaring a LineScan object and passing the constructor
a 1xN list of values.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> s = img.getLineScan(y=128)
>>>> ss = s.smooth()
>>>> plt.plot(s)
>>>> plt.plot(ss)
>>>> plt.show()
"""
pointLoc = None
image = None
def __init__(self, args, **kwargs):
if isinstance(args, np.ndarray):
args = args.tolist()
list.__init__(self,args)
self.image = None
self.pt1 = None
self.pt2 = None
self.row = None
self.col = None
self.channel = -1
for key in kwargs:
if key == 'pointLocs':
if kwargs[key] is not None:
self.pointLoc = kwargs[key]
if key == 'image':
if kwargs[key] is not None:
self.img = kwargs[key]
if key == 'pt1':
if kwargs[key] is not None:
self.pt1 = kwargs[key]
if key == 'pt2':
if kwargs[key] is not None:
self.pt2 = kwargs[key]
if key == "x":
if kwargs[key] is not None:
self.col = kwargs[key]
if key == "y":
if kwargs[key] is not None:
self.row = kwargs[key]
if key == "channel":
if kwargs[key] is not None:
self.channel = kwargs[key]
if(self.pointLoc is None):
self.pointLoc = zip(range(0,len(self)),range(0,len(self)))
def __getitem__(self,key):
"""
**SUMMARY**
Returns a LineScan when sliced. Previously used to
return list. Now it is possible to use LineScanm member
functions on sub-lists
"""
if type(key) is slice: #Or can use 'try:' for speed
return LineScan(list.__getitem__(self, key))
else:
return list.__getitem__(self,key)
def __getslice__(self, i, j):
"""
Deprecated since python 2.0, now using __getitem__
"""
return self.__getitem__(slice(i,j))
def __sub__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.sub,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __add__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.add,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __mul__(self,other):
if len(self) == len(other):
retVal = LineScan(map(operator.mul,self,other))
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def __div__(self,other):
if len(self) == len(other):
try:
retVal = LineScan(map(operator.div,self,other))
except ZeroDivisionError:
print('Second LineScan contains zeros')
return None
else:
print('Size mismatch')
return None
retVal._update(self)
return retVal
def _update(self, linescan):
"""
** SUMMARY**
Updates LineScan's Instance Objects.
"""
self.image = linescan.image
self.pt1 = linescan.pt1
self.pt2 = linescan.pt2
self.row = linescan.row
self.col = linescan.col
self.channel = linescan.channel
self.pointLoc = linescan.pointLoc
def smooth(self,degree=3):
"""
**SUMMARY**
Perform a Gasusian simple smoothing operation on the signal.
**PARAMETERS**
* *degree* - The degree of the fitting function. Higher degree means more smoothing.
**RETURNS**
A smoothed LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.smooth(7))
>>>> plt.show()
**NOTES**
Cribbed from http://www.swharden.com/blog/2008-11-17-linear-data-smoothing-in-python/
"""
window=degree*2-1
weight=np.array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(np.exp((4*(frac))**2))
weightGauss.append(gauss)
weight=np.array(weightGauss)*weight
smoothed=[0.0]*(len(self)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(np.array(self[i:i+window])*weight)/sum(weight)
# recenter the signal so it sits nicely on top of the old
front = self[0:(degree-1)]
front += smoothed
front += self[-1*degree:]
retVal = LineScan(front,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def normalize(self):
"""
**SUMMARY**
Normalize the signal so the maximum value is scaled to one.
**RETURNS**
A normalized scanline object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.normalize())
>>>> plt.show()
"""
temp = np.array(self, dtype='float32')
temp = temp / np.max(temp)
retVal = LineScan(list(temp[:]),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def scale(self,value_range=(0,1)):
"""
**SUMMARY**
Scale the signal so the maximum and minimum values are
all scaled to the values in value_range. This is handy
if you want to compare the shape of two signals that
are scaled to different ranges.
**PARAMETERS**
* *value_range* - A tuple that provides the lower and upper bounds
for the output signal.
**RETURNS**
A scaled LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.scale(value_range(0,255)))
>>>> plt.show()
**SEE ALSO**
"""
temp = np.array(self, dtype='float32')
vmax = np.max(temp)
vmin = np.min(temp)
a = np.min(value_range)
b = np.max(value_range)
temp = (((b-a)/(vmax-vmin))*(temp-vmin))+a
retVal = LineScan(list(temp[:]),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def minima(self):
"""
**SUMMARY**
The function the global minima in the line scan.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MinimaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> minima = sl.smooth().minima()
>>>> plt.plot(sl)
>>>> for m in minima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
# all of these functions should return
# value, index, pixel coordinate
# [(index,value,(pix_x,pix_y))...]
minvalue = np.min(self)
idxs = np.where(np.array(self)==minvalue)[0]
minvalue = np.ones((1,len(idxs)))*minvalue # make zipable
minvalue = minvalue[0]
pts = np.array(self.pointLoc)
pts = pts[idxs]
pts = [(p[0],p[1]) for p in pts] # un numpy this
return zip(idxs,minvalue,pts)
def maxima(self):
"""
**SUMMARY**
The function finds the global maxima in the line scan.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MaximaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> maxima = sl.smooth().maxima()
>>>> plt.plot(sl)
>>>> for m in maxima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
# all of these functions should return
# value, index, pixel coordinate
# [(index,value,(pix_x,pix_y))...]
maxvalue = np.max(self)
idxs = np.where(np.array(self)==maxvalue)[0]
maxvalue = np.ones((1,len(idxs)))*maxvalue # make zipable
maxvalue = maxvalue[0]
pts = np.array(self.pointLoc)
pts = pts[idxs]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idxs,maxvalue,pts)
def derivative(self):
"""
**SUMMARY**
This function finds the discrete derivative of the signal.
The discrete derivative is simply the difference between each
succesive samples. A good use of this function is edge detection
**RETURNS**
Returns the discrete derivative function as a LineScan object.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.derivative())
>>>> plt.show()
"""
temp = np.array(self,dtype='float32')
d = [0]
d += list(temp[1:]-temp[0:-1])
retVal = LineScan(d,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
#retVal.image = self.image
#retVal.pointLoc = self.pointLoc
return retVal
def localMaxima(self):
"""
**SUMMARY**
The function finds local maxima in the line scan. Local maxima
are defined as points that are greater than their neighbors to
the left and to the right.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MaximaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> maxima = sl.smooth().maxima()
>>>> plt.plot(sl)
>>>> for m in maxima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
temp = np.array(self)
idx = np.r_[True, temp[1:] > temp[:-1]] & np.r_[temp[:-1] > temp[1:], True]
idx = np.where(idx==True)[0]
values = temp[idx]
pts = np.array(self.pointLoc)
pts = pts[idx]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idx,values,pts)
def localMinima(self):
"""""
**SUMMARY**
The function the local minima in the line scan. Local minima
are defined as points that are less than their neighbors to
the left and to the right.
**RETURNS**
Returns a list of tuples of the format:
(LineScanIndex,MinimaValue,(image_position_x,image_position_y))
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> minima = sl.smooth().minima()
>>>> plt.plot(sl)
>>>> for m in minima:
>>>> plt.plot(m[0],m[1],'ro')
>>>> plt.show()
"""
temp = np.array(self)
idx = np.r_[True, temp[1:] < temp[:-1]] & np.r_[temp[:-1] < temp[1:], True]
idx = np.where(idx==True)[0]
values = temp[idx]
pts = np.array(self.pointLoc)
pts = pts[idx]
pts = [(p[0],p[1]) for p in pts] # un numpy
return zip(idx,values,pts)
def resample(self,n=100):
"""
**SUMMARY**
Resample the signal to fit into n samples. This method is
handy if you would like to resize multiple signals so that
they fit together nice. Note that using n < len(LineScan)
can cause data loss.
**PARAMETERS**
* *n* - The number of samples to resample to.
**RETURNS**
A LineScan object of length n.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> plt.plot(sl)
>>>> plt.plot(sl.resample(100))
>>>> plt.show()
"""
signal = sps.resample(self,n)
pts = np.array(self.pointLoc)
# we assume the pixel points are linear
# so we can totally do this better manually
x = linspace(pts[0,0],pts[-1,0],n)
y = linspace(pts[0,1],pts[-1,1],n)
pts = zip(x,y)
retVal = LineScan(list(signal),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
# this needs to be moved out to a cookbook or something
#def linear(xdata,m,b):
# return m*xdata+b
# need to add polyfit too
#http://docs.scipy.org/doc/numpy/reference/generated/numpy.polyfit.html
def fitToModel(self,f,p0=None):
"""
**SUMMARY**
Fit the data to the provided model. This can be any arbitrary
2D signal. Return the data of the model scaled to the data.
**PARAMETERS**
* *f* - a function of the form f(x_values, p0,p1, ... pn) where
p is parameter for the model.
* *p0* - a list of the initial guess for the model parameters.
**RETURNS**
A LineScan object where the fitted model data replaces the
actual data.
**EXAMPLE**
>>>> def aLine(x,m,b):
>>>> return m*x+b
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> fit = sl.fitToModel(aLine)
>>>> plt.plot(sl)
>>>> plt.plot(fit)
>>>> plt.show()
"""
yvals = np.array(self,dtype='float32')
xvals = range(0,len(yvals),1)
popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0)
yvals = f(xvals,*popt)
retVal = LineScan(list(yvals),image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def getModelParameters(self,f,p0=None):
"""
**SUMMARY**
Fit a model to the data and then return
**PARAMETERS**
* *f* - a function of the form f(x_values, p0,p1, ... pn) where
p is parameter for the model.
* *p0* - a list of the initial guess for the model parameters.
**RETURNS**
The model parameters as a list. For example if you use a line
model y=mx+b the function returns the m and b values that fit
the data.
**EXAMPLE**
>>>> def aLine(x,m,b):
>>>> return m*x+b
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> p = sl.getModelParameters(aLine)
>>>> print p
"""
yvals = np.array(self,dtype='float32')
xvals = range(0,len(yvals),1)
popt,pcov = spo.curve_fit(f,xvals,yvals,p0=p0)
return popt
def convolve(self,kernel):
"""
**SUMMARY**
Convolve the line scan with a one dimenisional kernel stored as
a list. This allows you to create an arbitrary filter for the signal.
**PARAMETERS**
* *kernel* - An Nx1 list or np.array that defines the kernel.
**RETURNS**
A LineScan feature with the kernel applied. We crop off
the fiddly bits at the end and the begining of the kernel
so everything lines up nicely.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> smooth_kernel = [0.1,0.2,0.4,0.2,0.1]
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> out = sl.convolve(smooth_kernel)
>>>> plt.plot(sl)
>>>> plt.plot(out)
>>>> plt.show()
**SEE ALSO**
"""
out = np.convolve(self,np.array(kernel,dtype='float32'),'same')
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2,channel=self.channel)
return retVal
def fft(self):
"""
**SUMMARY**
Perform a Fast Fourier Transform on the line scan and return
the FFT output and the frequency of each value.
**RETURNS**
The FFT as a numpy array of irrational numbers and a one dimensional
list of frequency values.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(y=128)
>>>> fft,freq = sl.fft()
>>>> plt.plot(freq,fft.real,freq,fft.imag)
>>>> plt.show()
"""
signal = np.array(self,dtype='float32')
fft = np.fft.fft(signal)
freq = np.fft.fftfreq(len(signal))
return (fft,freq)
def ifft(self,fft):
"""
**SUMMARY**
Perform an inverse fast Fourier transform on the provided
irrationally valued signal and return the results as a
LineScan.
**PARAMETERS**
* *fft* - A one dimensional numpy array of irrational values
upon which we will perform the IFFT.
**RETURNS**
A LineScan object of the reconstructed signal.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> sl = img.getLineScan(pt1=(0,0),pt2=(300,200))
>>>> fft,frq = sl.fft()
>>>> fft[30:] = 0 # low pass filter
>>>> sl2 = sl.ifft(fft)
>>>> import matplotlib.pyplot as plt
>>>> plt.plot(sl)
>>>> plt.plot(sl2)
"""
signal = np.fft.ifft(fft)
retVal = LineScan(signal.real)
retVal.image = self.image
retVal.pointLoc = self.pointLoc
return retVal
def createEmptyLUT(self,defaultVal=-1):
"""
**SUMMARY**
Create an empty look up table (LUT).
If default value is what the lut is intially filled with
if defaultVal == 0
the array is all zeros.
if defaultVal > 0
the array is set to default value. Clipped to 255.
if defaultVal < 0
the array is set to the range [0,255]
if defaultVal is a tuple of two values:
we set stretch the range of 0 to 255 to match
the range provided.
**PARAMETERS**
* *defaultVal* - See above.
**RETURNS**
A LUT.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
lut = None
if( isinstance(defaultVal,list) or
isinstance(defaultVal,tuple)):
start = np.clip(defaultVal[0],0,255)
stop = np.clip(defaultVal[1],0,255)
lut = np.around(np.linspace(start,stop,256),0)
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()
elif( defaultVal == 0 ):
lut = np.zeros([1,256]).tolist()[0]
elif( defaultVal > 0 ):
defaultVal = np.clip(defaultVal,1,255)
lut = np.ones([1,256])*defaultVal
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()[0]
elif( defaultVal < 0 ):
lut = np.linspace(0,256,256)
lut = np.array(lut,dtype='uint8')
lut = lut.tolist()
return lut
def fillLUT(self,lut,idxs,value=255):
"""
**SUMMARY**
Fill up an existing LUT (look up table) at the indexes specified
by idxs with the value specified by value. This is useful for picking
out specific values.
**PARAMETERS**
* *lut* - An existing LUT (just a list of 255 values).
* *idxs* - The indexes of the LUT to fill with the value.
This can also be a sample swatch of an image.
* *value* - the value to set the LUT[idx] to
**RETURNS**
An updated LUT.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> swatch = img.crop(0,0,10,10)
>>>> ls.fillLUT(lut,swatch,255)
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
# for the love of god keep this small
# for some reason isInstance is being persnickety
if(idxs.__class__.__name__ == 'Image' ):
npg = idxs.getGrayNumpy()
npg = npg.reshape([npg.shape[0]*npg.shape[1]])
idxs = npg.tolist()
value = np.clip(value,0,255)
for idx in idxs:
if(idx >= 0 and idx < len(lut)):
lut[idx]=value
return lut
def threshold(self,threshold=128,invert=False):
"""
**SUMMARY**
Do a 1D threshold operation. Values about the threshold
will be set to 255, values below the threshold will be
set to 0. If invert is true we do the opposite.
**PARAMETERS**
* *threshold* - The cutoff value for our threshold.
* *invert* - if invert is false values above the threshold
are set to 255, if invert is True the are set to 0.
**RETURNS**
The thresholded linescan operation.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.threshold()
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
out = []
high = 255
low = 0
if( invert ):
high = 0
low = 255
for pt in self:
if( pt < threshold ):
out.append(low)
else:
out.append(high)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def invert(self,max=255):
"""
**SUMMARY**
Do an 8bit invert of the signal. What was black is now
white, what was 255 is now zero.
**PARAMETERS**
* *max* - The maximum value of a pixel in the image, usually 255.
**RETURNS**
The inverted LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.invert()
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
out = []
for pt in self:
out.append(255-pt)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def mean(self):
"""
**SUMMARY**
Computes the statistical mean of the signal.
**RETURNS**
The mean of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> avg = ls.mean()
>>>> plt.plot(ls)
>>>> plt.axhline(y = avg)
>>>> plt.show()
"""
return float(sum(self))/len(self)
def variance(self):
"""
**SUMMARY**
Computes the variance of the signal.
**RETURNS**
The variance of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> var = ls.variance()
>>>> var
"""
mean = float(sum(self))/len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return summation/len(self)
def std(self):
"""
**SUMMARY**
Computes the standard deviation of the signal.
**RETURNS**
The standard deviation of the LineScan object.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> avg = ls.mean()
>>>> std = ls.std()
>>>> plt.plot(ls)
>>>> plt.axhline(y = avg)
>>>> plt.axhline(y = avg - std, color ='r')
>>>> plt.axhline(y = avg + std, color ='r')
>>>> plt.show()
"""
mean = float(sum(self))/len(self)
summation = 0
for num in self:
summation += (num - mean)**2
return np.sqrt(summation/len(self))
def median(self,sz=5):
"""
**SUMMARY**
Do a sliding median filter with a window size equal to size.
**PARAMETERS**
* *sz* - the size of the median filter.
**RETURNS**
The linescan after being passed through the median filter.
The last index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> ls2 = ls.median(7)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
>>>> plt.show()
"""
if( sz%2==0 ):
sz = sz+1
skip = int(np.floor(sz/2))
out = self[0:skip]
vsz = len(self)
for idx in range(skip,vsz-skip):
val = np.median(self[(idx-skip):(idx+skip)])
out.append(val)
for pt in self[-1*skip:]:
out.append(pt)
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def findFirstIdxEqualTo(self,value=255):
"""
**SUMMARY**
Find the index of the first element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The first index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findFIRSTIDXEqualTo()
"""
vals = np.where(np.array(self)==value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[0]
return retVal
def findLastIdxEqualTo(self,value=255):
"""
**SUMMARY**
Find the index of the last element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The last index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findLastIDXEqualTo()
"""
vals = np.where(np.array(self)==value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[-1]
return retVal
def findFirstIdxGreaterThan(self,value=255):
"""
**SUMMARY**
Find the index of the first element of the linescan that has
a value equal to value. If nothing is found None is returned.
**PARAMETERS**
* *value* - the value to look for.
**RETURNS**
The first index where the value occurs or None if none is found.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> idx = ls.findFIRSTIDXEqualTo()
"""
vals = np.where(np.array(self)>=value)[0]
retVal = None
if( len(vals) > 0 ):
retVal = vals[0]
return retVal
def applyLUT(self,lut):
"""
**SUMMARY**
Apply a look up table to the signal.
**PARAMETERS**
* *lut* an array of of length 256, the array elements are the values
that are replaced via the lut
**RETURNS**
A LineScan object with the LUT applied to the values.
**EXAMPLE**
>>>> ls = img.getLineScan(x=10)
>>>> lut = ls.createEmptyLUT()
>>>> ls2 = ls.applyLUT(lut)
>>>> plt.plot(ls)
>>>> plt.plot(ls2)
"""
out = []
for pt in self:
out.append(lut[pt])
retVal = LineScan(out,image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2)
retVal._update(self)
return retVal
def medianFilter(self, kernel_size=5):
"""
**SUMMARY**
Apply median filter on the data
**PARAMETERS**
* *kernel_size* - Size of the filter (should be odd int) - int
**RETURNS**
A LineScan object with the median filter applied to the values.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> mf = ls.medianFilter()
>>> plt.plot(ls)
>>> plt.plot(mf)
"""
try:
from scipy.signal import medfilt
except ImportError:
warnings.warn("Scipy vesion >= 0.11 requierd.")
return None
if kernel_size % 2 == 0:
kernel_size-=1
print("Kernel Size should be odd. New kernel size =" , (kernel_size))
medfilt_array = medfilt(np.asarray(self[:]), kernel_size)
retVal = LineScan(medfilt_array.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row)
retVal._update(self)
return retVal
def detrend(self):
"""
**SUMMARY**
Detren the data
**PARAMETERS**
**RETURNS**
A LineScan object with detrened data.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> dt = ls.detrend()
>>> plt.plot(ls)
>>> plt.plot(dt)
"""
try:
from scipy.signal import detrend as sdetrend
except ImportError:
warnings.warn("Scipy vesion >= 0.11 requierd.")
return None
detrend_arr = sdetrend(np.asarray(self[:]))
retVal = LineScan(detrend_arr.astype("uint8").tolist(), image=self.image,pointLoc=self.pointLoc,pt1=self.pt1,pt2=self.pt2, x=self.col, y=self.row)
retVal._update(self)
return retVal
def runningAverage(self, diameter=3, algo="uniform"):
"""
**SUMMARY**
Finds the running average by either using a uniform kernel or using a gaussian kernel.
The gaussian kernelis calculated from the standard normal distribution formulae.
**PARAMETERS**
* *diameter* - Size of the window (should be odd int) - int
* *algo* - "uniform" (default) / "gaussian" - used to decide the kernel - string
**RETURNS**
A LineScan object with the kernel of the provided algorithm applied.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> ra = ls.runningAverage()
>>> rag = ls.runningAverage(15,algo="gaussian")
>>> plt.plot(ls)
>>> plt.plot(ra)
>>> plt.plot(rag)
>>> plt.show()
"""
if diameter%2 == 0:
warnings.warn("Diameter must be an odd integer")
return None
if algo=="uniform":
kernel=list(1/float(diameter)*np.ones(diameter))
elif algo=="gaussian":
kernel=list()
r=float(diameter)/2
for i in range(-int(r),int(r)+1):
kernel.append(np.exp(-i**2/(2*(r/3)**2))/(np.sqrt(2*np.pi)*(r/3)))
retVal = LineScan(map(int,self.convolve(kernel)))
retVal._update(self)
return retVal
def findPeaks(self, window = 30, delta = 3):
"""
**SUMMARY**
Finds the peaks in a LineScan.
**PARAMETERS**
* *window* - the size of the window in which the peak
should have the highest value to be considered as a peak.
By default this is 15 as it gives appropriate results.
The lower this value the more the peaks are returned
* *delta* - the minimum difference between the peak and
all elements in the window
**RETURNS**
A list of (peak position, peak value) tuples.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> peaks = ls.findPeaks()
>>> print peaks
>>> peaks10 = ls.findPeaks(window=10)
>>> print peaks10
"""
maximum = -np.Inf
width = int(window/2.0)
peaks = []
for index,val in enumerate(self):
#peak found
if val > maximum:
maximum = val
maxpos = index
#checking whether peak satisfies window and delta conditions
if max( self[max(0, index-width):index+width])+delta< maximum:
peaks.append((maxpos, maximum))
maximum = -np.Inf
return peaks
def findValleys(self,window = 30, delta = 3 ):
"""
**SUMMARY**
Finds the valleys in a LineScan.
**PARAMETERS**
* *window* - the size of the window in which the valley
should have the highest value to be considered as a valley.
By default this is 15 as it gives appropriate results.
The lower this value the more the valleys are returned
* *delta* - the minimum difference between the valley and
all elements in the window
**RETURNS**
A list of (peak position, peak value) tuples.
**EXAMPLE**
>>> ls = img.getLineScan(x=10)
>>> valleys = ls.findValleys()
>>> print valleys
>>> valleys10 = ls.findValleys(window=10)
>>> print valleys10
"""
minimum = np.Inf
width = int(window/2.0)
peaks = []
for index,val in enumerate(self):
#peak found
if val < minimum:
minimum = val
minpos = index
#checking whether peak satisfies window and delta conditions
if min( self[max(0, index-width):index+width])-delta > minimum:
peaks.append((minpos, minimum))
minimum = np.Inf
return peaks
def fitSpline(self,degree=2):
"""
**SUMMARY**
A function to generate a spline curve fitting over the points in LineScan with
order of precision given by the parameter degree
**PARAMETERS**
* *degree* - the precision of the generated spline
**RETURNS**
The spline as a LineScan fitting over the initial values of LineScan
**EXAMPLE**
>>> import matplotlib.pyplot as plt
>>> img = Image("lenna")
>>> ls = img.getLineScan(pt1=(10,10)),pt2=(20,20)).normalize()
>>> spline = ls.fitSpline()
>>> plt.plot(ls)
>>> plt.show()
>>> plt.plot(spline)
>>> plt.show()
**NOTES**
Implementation taken from http://www.scipy.org/Cookbook/Interpolation
"""
if degree > 4:
degree = 4 # No significant improvement with respect to time usage
if degree < 1:
warnings.warn('LineScan.fitSpline - degree needs to be >= 1')
return None
retVal = None
y = np.array(self)
x = np.arange(0,len(y),1)
dx = 1
newx = np.arange(0,len(y)-1,pow(0.1,degree))
cj = sps.cspline1d(y)
retVal = sps.cspline1d_eval(cj,newx,dx=dx,x0=x[0])
return retVal
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/stats/_discrete_distns.py | 7 | 20397 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
import numpy.random as mtrand
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return (k-1) * log(1-p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self, M, n, N)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return mtrand.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return mtrand.poisson(mu1, n) - mtrand.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| apache-2.0 |
pelson/cartopy | lib/cartopy/examples/always_circular_stereo.py | 4 | 1645 | """
Custom Boundary Shape
---------------------
This example demonstrates how a custom shape geometry may be used
instead of the projection's default boundary.
In this instance, we define the boundary as a circle in axes coordinates.
This means that no matter the extent of the map itself, the boundary will
always be a circle.
"""
__tags__ = ['Lines and polygons']
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature as cfeature
def main():
fig = plt.figure(figsize=[10, 5])
ax1 = fig.add_subplot(1, 2, 1, projection=ccrs.SouthPolarStereo())
ax2 = fig.add_subplot(1, 2, 2, projection=ccrs.SouthPolarStereo(),
sharex=ax1, sharey=ax1)
fig.subplots_adjust(bottom=0.05, top=0.95,
left=0.04, right=0.95, wspace=0.02)
# Limit the map to -60 degrees latitude and below.
ax1.set_extent([-180, 180, -90, -60], ccrs.PlateCarree())
ax1.add_feature(cfeature.LAND)
ax1.add_feature(cfeature.OCEAN)
ax1.gridlines()
ax2.gridlines()
ax2.add_feature(cfeature.LAND)
ax2.add_feature(cfeature.OCEAN)
# Compute a circle in axes coordinates, which we can use as a boundary
# for the map. We can pan/zoom as much as we like - the boundary will be
# permanently circular.
theta = np.linspace(0, 2*np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
ax2.set_boundary(circle, transform=ax2.transAxes)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
davidgroves/copperdog | roles/jupyter/files/jupyter_notebook_config.py | 1 | 20103 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Full path of a config file.
# c.JupyterApp.config_file = ''
# Specify a config file to load.
# c.JupyterApp.config_file_name = ''
# Generate default config file.
# c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/jupyter/'
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = ''
# The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = '/etc/ssl/bach.copperdog.org.crt'
# The full path to a certificate authority certifificate for SSL/TLS client
# authentication.
# c.NotebookApp.client_ca = ''
# The config manager class to use
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = b''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = []
#
# c.NotebookApp.file_to_run = ''
# Use minified JS file or not, mainly use during dev to avoid JS recompilation
# c.NotebookApp.ignore_minified_js = False
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = {}
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
# The full path to a private key file for usage with SSL/TLS.
c.NotebookApp.keyfile = '/etc/ssl/bach.copperdog.org.key'
# The login handler class to use.
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/data/jupyter/'
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = 'sha1:b32feb3f51c7:aa200891d28074c6dbd0a3435edc8854f496c1e4'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# The number of additional ports to try if the specified port is not available.
c.NotebookApp.port_retries = 0
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = []
# The session manager class to use.
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = ''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# Debug output in the Session
# c.Session.debug = False
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# execution key, for signing messages.
# c.Session.key = b''
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = ''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'jupyter'
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python3'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = None
#
# c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
#
# c.ContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin configuration
#------------------------------------------------------------------------------
# Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
# By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
# c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#
# c.FileContentsManager.root_dir = ''
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = ''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = b''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = ''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set()
| apache-2.0 |
bert9bert/statsmodels | examples/python/wls.py | 33 | 2675 |
## Weighted Least Squares
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
np.random.seed(1024)
# ## WLS Estimation
#
# ### Artificial data: Heteroscedasticity 2 groups
#
# Model assumptions:
#
# * Misspecification: true model is quadratic, estimate only linear
# * Independent noise/error term
# * Two groups for error variance, low and high variance groups
nsample = 50
x = np.linspace(0, 20, nsample)
X = np.column_stack((x, (x - 5)**2))
X = sm.add_constant(X)
beta = [5., 0.5, -0.01]
sig = 0.5
w = np.ones(nsample)
w[nsample * 6/10:] = 3
y_true = np.dot(X, beta)
e = np.random.normal(size=nsample)
y = y_true + sig * w * e
X = X[:,[0,1]]
# ### WLS knowing the true variance ratio of heteroscedasticity
mod_wls = sm.WLS(y, X, weights=1./w)
res_wls = mod_wls.fit()
print(res_wls.summary())
# ## OLS vs. WLS
#
# Estimate an OLS model for comparison:
res_ols = sm.OLS(y, X).fit()
print(res_ols.params)
print(res_wls.params)
# Compare the WLS standard errors to heteroscedasticity corrected OLS standard errors:
se = np.vstack([[res_wls.bse], [res_ols.bse], [res_ols.HC0_se],
[res_ols.HC1_se], [res_ols.HC2_se], [res_ols.HC3_se]])
se = np.round(se,4)
colnames = ['x1', 'const']
rownames = ['WLS', 'OLS', 'OLS_HC0', 'OLS_HC1', 'OLS_HC3', 'OLS_HC3']
tabl = SimpleTable(se, colnames, rownames, txt_fmt=default_txt_fmt)
print(tabl)
# Calculate OLS prediction interval:
covb = res_ols.cov_params()
prediction_var = res_ols.mse_resid + (X * np.dot(covb,X.T).T).sum(1)
prediction_std = np.sqrt(prediction_var)
tppf = stats.t.ppf(0.975, res_ols.df_resid)
prstd_ols, iv_l_ols, iv_u_ols = wls_prediction_std(res_ols)
# Draw a plot to compare predicted values in WLS and OLS:
prstd, iv_l, iv_u = wls_prediction_std(res_wls)
fig, ax = plt.subplots()
ax.plot(x, y, 'o', label="Data")
ax.plot(x, y_true, 'b-', label="True")
# OLS
ax.plot(x, res_ols.fittedvalues, 'r--')
ax.plot(x, iv_u_ols, 'r--', label="OLS")
ax.plot(x, iv_l_ols, 'r--')
# WLS
ax.plot(x, res_wls.fittedvalues, 'g--.')
ax.plot(x, iv_u, 'g--', label="WLS")
ax.plot(x, iv_l, 'g--')
ax.legend(loc="best");
# ## Feasible Weighted Least Squares (2-stage FWLS)
resid1 = res_ols.resid[w==1.]
var1 = resid1.var(ddof=int(res_ols.df_model)+1)
resid2 = res_ols.resid[w!=1.]
var2 = resid2.var(ddof=int(res_ols.df_model)+1)
w_est = w.copy()
w_est[w!=1.] = np.sqrt(var2) / np.sqrt(var1)
res_fwls = sm.WLS(y, X, 1./w_est).fit()
print(res_fwls.summary())
| bsd-3-clause |
nddsg/TreeDecomps | xplodnTree/tdec/tredec.edgelist_dimacs.py | 1 | 6413 | __version__="0.1.0"
# ToDo:
# [] process mult dimacs.trees to hrg
import sys
import traceback
import argparse
import os
import glob
import networkx as nx
import pandas as pd
from PHRG import graph_checks
import subprocess
import math
import graph_sampler as gs
global args
def get_parser ():
parser = argparse.ArgumentParser(description='Given an edgelist and PEO heuristic perform tree decomposition')
parser.add_argument('--orig', required=True, help='input the reference graph in edgelist format')
parser.add_argument('--version', action='version', version=__version__)
return parser
def dimacs_nddgo_tree(dimacsfnm_lst, heuristic):
# print heuristic,dimacsfnm_lst
for dimacsfname in dimacsfnm_lst:
nddgoout = ""
args = ["bin/mac/serial_wis -f {} -nice -{} -w {}.tree".format(dimacsfname, heuristic, dimacsfname)]
while not nddgoout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
popen.wait()
# output = popen.stdout.read()
out, err = popen.communicate()
nddgoout = out.split('\n')
print nddgoout
return dimacsfname+".tree"
def load_edgelist(gfname):
import pandas as pd
try:
edglst = pd.read_csv(gfname, comment='%', delimiter='\t')
# print edglst.shape
if edglst.shape[1]==1: edglst = pd.read_csv(gfname, comment='%', delimiter="\s+")
except Exception, e:
print "EXCEPTION:",str(e)
traceback.print_exc()
sys.exit(1)
if edglst.shape[1] == 3:
edglst.columns = ['src', 'trg', 'wt']
elif edglst.shape[1] == 4:
edglst.columns = ['src', 'trg', 'wt','ts']
else:
edglst.columns = ['src', 'trg']
g = nx.from_pandas_dataframe(edglst,source='src',target='trg')
g.name = os.path.basename(gfname)
return g
def nx_edges_to_nddgo_graph (G,n,m, sampling=False, peoh=""):
# print args['peoh']
ofname = 'datasets/{}_{}.dimacs'.format(G.name, peoh)
# print '...', ofname
if sampling:
edges = G.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: ./{}'.format(ofname)
else:
edges = G.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: ./{}'.format(ofname)
return [ofname]
def nx_edges_to_nddgo_graph_sampling(graph, n, m, peo_h):
G = graph
if n is None and m is None: return
# n = G.number_of_nodes()
# m = G.number_of_edges()
nbr_nodes = 256
basefname = 'datasets/{}_{}'.format(G.name, peo_h)
K = int(math.ceil(.25*G.number_of_nodes()/nbr_nodes))
print "--", nbr_nodes, K, '--';
for j,Gprime in enumerate(gs.rwr_sample(G, K, nbr_nodes)):
# if gname is "":
# # nx.write_edgelist(Gprime, '/tmp/sampled_subgraph_200_{}.tsv'.format(j), delimiter="\t", data=False)
# gprime_lst.append(Gprime)
# else:
# # nx.write_edgelist(Gprime, '/tmp/{}{}.tsv'.format(gname, j), delimiter="\t", data=False)
# gprime_lst.append(Gprime)
# # print "... files written: /tmp/{}{}.tsv".format(gname, j)
edges = Gprime.edges()
edges = [(int(e[0]), int(e[1])) for e in edges]
df = pd.DataFrame(edges)
df.sort_values(by=[0], inplace=True)
ofname = basefname+"_{}.dimacs".format(j)
with open(ofname, 'w') as f:
f.write('c {}\n'.format(G.name))
f.write('p edge\t{}\t{}\n'.format(n,m))
# for e in df.iterrows():
output_edges = lambda x: f.write("e\t{}\t{}\n".format(x[0], x[1]))
df.apply(output_edges, axis=1)
# f.write("e\t{}\t{}\n".format(e[0]+1,e[1]+1))
if os.path.exists(ofname): print 'Wrote: {}'.format(ofname)
return basefname
def edgelist_dimacs_graph(orig_graph, peo_h):
fname = orig_graph
gname = os.path.basename(fname).split(".")
gname = sorted(gname,reverse=True, key=len)[0]
G = nx.read_edgelist(fname, comments="%", data=False, nodetype=int)
# print "...", G.number_of_nodes(), G.number_of_edges()
# from numpy import max
# print "...", max(G.nodes()) ## to handle larger 300K+ nodes with much larger labels
N = max(G.nodes())
M = G.number_of_edges()
# +++ Graph Checks
if G is None: sys.exit(1)
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
graph_checks(G)
# --- graph checks
G.name = gname
# print "...", G.number_of_nodes(), G.number_of_edges()
#if G.number_of_nodes() > 500:
# return (nx_edges_to_nddgo_graph_sampling(G, n=N, m=M, peo_h=peo_h), gname)
#else:
return (nx_edges_to_nddgo_graph(G, n=N, m=M, peoh=peo_h), gname)
def print_treewidth (in_dimacs, var_elim):
nddgoout = ""
args = ["bin/mac/serial_wis -f {} -nice -{} -width".format(in_dimacs, var_elim)]
while not nddgoout:
popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=True)
popen.wait()
# output = popen.stdout.read()
out, err = popen.communicate()
nddgoout = out.split('\n')
print nddgoout
return nddgoout
def main ():
parser = get_parser()
args = vars(parser.parse_args())
dimacs_g, gname = edgelist_dimacs_graph(args['orig'],"")
if len(dimacs_g) == 1:
print "dimacs_g", dimacs_g
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| mit |
morris254/polymode | Polymode/Plotter.py | 5 | 9920 | # _*_ coding=utf-8 _*_
#
#---------------------------------------------------------------------------------
#Copyright © 2009 Andrew Docherty
#
#This program is part of Polymode.
#Polymode is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------------
"""
Functions for easy plotting of modes and modal properties
This is a light wrapper to matplotlib: http://matplotlib.sourceforge.net
"""
import logging
try:
import pylab
from pylab import *
except:
logging.error("Plot library unavailable, plots cannot be made")
pylab = None
from numpy import (pi,append,c_,cos,sin,newaxis,log,absolute,arange,exp,fft,real,imag,iterable,atleast_1d)
__spurious_warning_color__ = 'crimson'
def plot_v(coord, f, style='pcolor', cmap=None, color=None, alpha=1.0, aspect=None, plotstyle={}):
"""
Plot the 2-d data f using a coordinate object.
Parameters:
coord: The coordinate object associated to the data f
f: The 2-d array of data to be plotted
style: plotting style, one of 'pcolor'*, 'contour', 'line', 'vector', 'circ'
cmap: color map from pylab.cm
color: line color for the line-based plotters
alpha: the alpha transparancy of the plot
plotstyle: specific matplotlib styles
aspect: the plot aspect ratio
"""
if cmap==None: cmap = pylab.cm.jet
#Calculate the bases
rm,phim = coord.polar2d(interval=0)
xm,ym = coord.cartesian2d(interval=0)
irm,iphim = coord.polar2d(interval=1)
ixm,iym = coord.cartesian2d(interval=1)
#Close plot if spans 2*pi and plotting with contour plot
if (style.startswith('cont') or style.startswith('line')) and hasattr(coord,'arange'):
if abs(abs(diff(coord.arange))-2*pi)<1e-6:
xm = append(xm, xm[:,:1], axis=1)
ym = append(ym, ym[:,:1], axis=1)
f = append(f, f[:,:1], axis=1)
#The actual plotting commands
autoaspect = 'equal'
if style.startswith('cont'):
V=10
c=pylab.contourf(xm,ym,f.real,V,colors=color,cmap=cmap,alpha=alpha,linestyles=None)
ax = gca()
elif style.startswith('line'):
c=pylab.contour(xm,ym,f.real,colors=color,cmap=cmap,alpha=alpha)
ax = gca()
elif style.startswith('pcol'):
c=pylab.pcolor(ixm,iym,f.real,cmap=cmap,shading='flat',alpha=alpha)
ax = gca()
#Vector plot
elif style.startswith('vector'):
fx,fy = 0.1*f.real/abs(f).max()
c=pylab.quiver(xm,ym,fx,fy, pivot='middle', scale=2, color=color)
ax = gca()
#Plot circular polarization
elif style.startswith('circ'):
from matplotlib import patches
ax=gca()
#Could make these a little more configurable!
size = 0.4*min(coord.characteristic_length)
dp=0.02
arrowp = 0.2
width=0.5
phis = arange(dp,1+dp,dp)*2*pi
fx,fy = size*f/abs(f).max()
for ii in ndindex(fx.shape):
cx,cy = fx[ii], fy[ii]
xy = real([xm[ii] + cx*exp(1j*phis), ym[ii] + cy*exp(1j*phis)])
e = patches.Polygon(xy.T, fill=0, ec=color, **plotstyle)
ax.add_artist(e)
#Only add arrow if ellipse is large enough
if linalg.norm([cx,cy])>0.5*size:
dx,dy = real([cx*exp(1j*arrowp)-cx, cy*exp(1j*arrowp)-cy])
xyt = array([xm[ii]+real([cx-width*dy,cx+width*dy,cx+dx]),\
ym[ii]+real([cy+width*dx,cy-width*dx,cy+dy])])
arrow = patches.Polygon(xyt.T, fill=1, ec=color, fc=color, **plotstyle)
ax.add_artist(arrow)
ax.axis([xm.min(), xm.max(), ym.min(), ym.max()])
elif style.startswith('3d'):
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
ax = Axes3D(gcf())
ax.plot_surface(xm,ym,f.real, rstride=1, cstride=1, cmap=cmap)
elif style.startswith('polar'):
pylab.pcolor(irm,iphim,f.real,cmap=cmap,shading='flat',alpha=alpha)
ax = gca()
autoaspect = 'auto'
else:
raise NotImplementedError, "Plot type not implemented"
#Set aspect ratio for plot
ax.set_aspect(autoaspect if aspect is None else aspect)
draw()
def plot_modes_in_grid(modes, plottype='Sz', wg=None, title=None, bar=False,
Nrows=None, axis_off=None, **plotargs):
"""
Plot all given modes on one figure.
Parameters:
- plottype: Standard mode.plot() type
- wg: if given the waveguide will be plotted
- title: how to title the plots, either 'full', 'neff', 'number' or ''
- bar: plot a colorbar on each plot
- axis_off: hide the axes
- Nx: resolution of plot
- cartesian: True/False to plot cartesian or polar
"""
#fig = pylab.figure()
fig = gcf()
plottype = plottype.replace(',',' ').split()
modes = np.atleast_1d(modes)
nmodes = len(modes)
ntypes = len(plottype)
#Set appropriate title
if title is None and nmodes*ntypes<6: title='full'
elif title is None: title=''
#The dimensions of the grid
if Nrows is None:
Nrows = floor(sqrt(nmodes*ntypes))
Ncols = ceil(nmodes*ntypes/Nrows)
axis_ticks = nmodes<4
ii = 0
for ii in range(nmodes):
for jj in range(ntypes):
ax = fig.add_subplot(Nrows,Ncols,ii*ntypes+jj+1)
#Only put ticks if there aren't too many modes
if not axis_ticks:
ax.set_xticks([])
ax.set_yticks([])
if axis_off:
ax.set_axis_off()
#if wg: wg.plot(fill=False)
if title == 'number': title_str = "(%d)" % ii
elif title == 'full': title_str = r"%(type)s, $n_{\mathrm{eff}}=%(tneff)s$"
elif title == 'neff': title_str = r"$%(tneff)s$"
else: title_str = title
modes[ii].plot(plottype[jj], wg=wg, title=title_str, **plotargs)
if wg:
wg.plot(fill=0)
#Colorize background if a spurious mode is detected
if modes[ii].is_spurious:
ax.set_axis_bgcolor(__spurious_warning_color__)
if bar: np.colorbar()
return fig
def extract_data_from_modes(modes=[], datatype='', return_label=False):
"""
Return a list containing the requested modal paramters for each mode given
modes: list of modes
datatype: 'neff', 'loss', 'wl', 'ineff', 'Na', 'Nr'
return_label: if true also return a latex formatted data label
"""
#Deal with groups of modes
modes = flatten(modes)
if datatype.startswith('loss'):
y = [ m.loss for m in modes ]
lab = r'Loss, db/m'
elif datatype.startswith('neff'):
y = [ real(m.neff) for m in modes ]
lab = r'$Re(n_{\rm eff})$'
elif datatype.startswith('ineff'):
y = [ imag(m.neff) for m in modes ]
lab = r'$Im(n_{\rm eff})$'
elif datatype.startswith('disp'):
y = [ m.dispersion for m in modes ]
lab = r'Dispersion'
elif datatype.startswith('w'):
y = [ m.wl for m in modes ]
lab = r'Wavelength, $\mu$m'
elif datatype.startswith('nr'):
y = [ m.coord.Nr for m in modes ]
lab = r'Radial resolution, $N_r$'
elif datatype.startswith('na'):
y = [ m.coord.Naz for m in modes ]
lab = r'Azimuthal resolution, $N_\phi$'
elif datatype in modes[0].label:
y = [ float(m.label[datatype]) for m in modes ]
lab = "%s" % datatype
if return_label:
return y,lab
else:
return y
def plot_mode_properties(modes=[], ydata='loss', xdata='wl', style='', sort2d=False):
"""
Plot a graph of the specified modal properties
modes: list of modes to extract property
ydata: name of property on y axis
xdata: name of property on x axis
style: matplotlib linestyle to plot with,
see help in pylab.plot
xdata, ydata can be one of:
'neff': the real part of the mode effective index
'ineff': the imaginary part of the mode effective index
'loss': the loss in db/km for the mode
'dispersion': the estimated dispersion of the mode
'wavelength': the mode wavelength
'nr': the radial resolution of the calculation
'naz': the azimuthal resolution of the calculation
"""
ax = gca()
x,xlab = extract_data_from_modes(modes, xdata.lower(),True)
y,ylab = extract_data_from_modes(modes, ydata.lower(),True)
if sort2d:
xall = sort(unique(x))
x2d = []; y2d = []
done=False; jj=0
while not done:
done=True
x_mm = []; y_mm = []
for ii in range(len(xall)):
yii = find(x==xall[ii])
ys = sorted(array(y)[yii], reverse=True)
if jj<len(ys):
done=False
x_mm += [ xall[ii] ]
y_mm += [ ys[jj] ]
if not done:
plot(x_mm, y_mm)
x2d+=[x_mm]; y2d+=[y_mm]
jj+=1
else:
ax.plot(x,y,style)
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
return ax
| gpl-3.0 |
gnsiva/Amphitrite | msClasses/TwoDdata.py | 1 | 8274 | import numpy as np
import matplotlib.pyplot as plt
from lib import utils
import lib.SG as SG
from collections import OrderedDict
class TwoDdata():
def __init__(self):
self.xvals = np.array([])
self.yvals = np.array([])
self.rawyvals = []
self.gradient = []
self.gPeaks = OrderedDict()
self.normalisationType = 'none'
#===========================================================================
# Data manipulation
#===========================================================================
def readFile(self,filename,x_range=0,grain=1):
'''Reads in x y coordinate pairs from text file
' ' separator as in copy spectrum list in MassLynx
x_range - allows you to select lower and upper bounds
in the format of [lower,upper]
grain - allows the missing of data to speed up processing
a grain of 2 means that every second value will be used'''
raw_data = open(filename,'r').readlines()
count = 0
self.xvals = []
self.yvals = []
for x in raw_data:
count += 1
if count == grain:
temp = x.rstrip('\r\n')
vals = map(float, temp.split('\t'))
if not x_range:
self.xvals.append(vals[0])
self.yvals.append(vals[1])
else:
if vals[0] > x_range[0] and vals[0] < x_range[1]:
self.xvals.append(vals[0])
self.yvals.append(vals[1])
count = 0
self.xvals = np.array(self.xvals)
self.yvals = np.array(self.yvals)
# so that it isn't overwritten by smoothing
self.rawyvals = self.yvals.copy()
self._normalisePreset()
def normalisationBpi(self):
'''Normalise to base peak intensity (0-100)'''
self.yvals = self.yvals/self.yvals.max()*100
self.setNormalisationType(type='bpi')
def normalisationArea(self):
self.yvals = self.yvals/np.sum(self.yvals)
self.setNormalisationType(type='area')
def setNormalisationType(self,type):
self.normalisationType = type
def smoothingSG(self,window_len=3,smoothes=2,poly_order=1):
'''Should only really be used on equally spaced data
Actual window length used is 2*window_len+1 to avoid breakage'''
window_len = 2*window_len + 1
self.restoreRawYvals()
for i in xrange(smoothes):
self.yvals = SG.sg(self.yvals,window_size=window_len,order=poly_order)
self._normalisePreset()
def _normalisePreset(self):
if self.normalisationType == 'bpi':
self.normalisationBpi()
elif self.normalisationType == 'area':
self.normalisationArea()
elif self.normalisationType == 'none':
pass
def restoreRawYvals(self):
self.yvals = self.rawyvals.copy()
self._normalisePreset()
def limitAxisX(self,lims):
"""Reduce the range of values in the dataset. Reducing the xlimits here
automatically reduces the yaxis.
:parameter lims: List of limits in the form [lower,upper]
"""
lowerI = utils.closest(lims[0],self.xvals)
higherI = utils.closest(lims[1],self.xvals)
self.xvals = self.xvals[lowerI:higherI]
self.yvals = self.yvals[lowerI:higherI]
self.rawyvals = self.rawyvals[lowerI:higherI]
def getAxesWithoutNans(self):
"""The CCS calibration can cause some xvals
to become NaNs, this function returns x and yvals
truncated to remove the NaNs
"""
xvals = self.xvals[np.invert(np.isnan(self.xvals))]
yvals = self.yvals[np.invert(np.isnan(self.xvals))]
return xvals,yvals
#===========================================================================
# Calculations
#===========================================================================
def calculateWeightedMeanStandardDeviation(self):
yvals = self.yvals/self.yvals.max()*100
average,stdev = utils.weightedAverageAndStd(self.xvals,yvals)
return average,stdev
def calculateAreaUnderCurve(self):
"""Integrates data using trapezium method"""
xvals,yvals = self.getAxesWithoutNans()
yvals = yvals/yvals.max()*100
return np.trapz(yvals,xvals)
#===========================================================================
# Peak finding
#===========================================================================
def _calculateGradient(self):
"""when reconstructing the data, make data[0] the start value,
skip the first gradient value then append on
gradient[i] * (ys[i+1] - ys[i]) (actually gradient[i+1] ...)"""
self.gradient = [0]
for i,x in enumerate(self.xvals):
if i+2 <= len(self.xvals):
try:
gr = (float(self.yvals[i+1])-float(self.yvals[i]))/(float(self.xvals[i+1]) - float(x))
except:
print 'Gradient calculation: divide by 0 replaced by 0.000001'
gr = 0.000001
self.gradient.append(gr)
self.gradient = np.array(self.gradient)
def findPeaks(self,limit=0):
"""limit allows you to ignore slow peaks (remove noise)
percentage of BPI e.g. 5 % cutoff should be 5"""
# get gradient for peak picking
self._calculateGradient()
gPeaks = OrderedDict()
count = 0
for i,v in enumerate(self.gradient):
if i+1 < len(self.gradient):
if v > 0:
if self.gradient[i+1] <= 0:
gPeaks[count] = []
gPeaks[count].append(self.xvals[i])
gPeaks[count].append(self.yvals[i])
count += 1
if limit:
gPeaks_out = OrderedDict()
lim = max([gPeaks[x][1] for x in gPeaks.keys()]) * float(limit)/100
count = 0
for i,(k,v) in enumerate(gPeaks.items()):
if v[1] > lim:
gPeaks_out[count] = []
gPeaks_out[count].append(gPeaks[k][0])
gPeaks_out[count].append(gPeaks[k][1])
count += 1
self.gPeaks = gPeaks_out
else:
self.gPeaks = gPeaks
def addPeak(self,mz):
"""This function allows you to add additional peaks not found using the
findPeaks method"""
try:
keys = sorted(self.gPeaks.keys())
id = 1+keys[-1]
self.gPeaks[id] = {}
except:
id = 0
self.gPeaks = {}
self.gPeaks[id] = {}
self.gPeaks[id] = [[],[]]
self.gPeaks[id][0] = mz
index = utils.closest(mz,self.xvals)
self.gPeaks[id][1] = self.yvals[index]
#===========================================================================
# Plotting
#===========================================================================
def plot(self,ax,**kwargs):
"""Plot 2D data (e.g. MS and ATDs)
Can take matplotlib axes object, as well as any standard
inputs for matplotlib.pyplot.plot().
"""
ax = utils.checkAx(ax)
if not 'color' in kwargs:
kwargs['color'] = 'black'
if not 'lw' in kwargs:
kwargs['lw'] = 0.8
ax.plot(self.xvals,self.yvals,**kwargs)
ax.set_ylabel('Intensity')
ax.set_xlabel('$m/z$')
def plotgPeaks(self,ax,labels=0,**kwargs):
"""if labels=0 then only peak id's are displayed
otherwise x-values are shown as well
"""
if not 'color' in kwargs:
kwargs['color'] = 'gray'
if not 'alpha' in kwargs:
kwargs['alpha'] = 0.5
for i,k in enumerate(self.gPeaks):
#print k
#print self.gPeaks.keys()
ax.axvline(self.gPeaks[k][0], **kwargs)
if labels:
label = str(i)+':'+str(self.gPeaks[k][0])
else:
label = str(i)
ax.annotate(label,[self.gPeaks[k][0],self.gPeaks[k][1]])
| gpl-2.0 |
shahankhatch/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
Visual-analitics-2015/-VAST-Challenge-2015-Mini-Challenge-1 | web_viewer/dino_server.py | 1 | 9289 | import tornado.ioloop
import tornado.web
import os
import pandas as pd
import numpy as np
import json
import pickle as pk
class DayInfo(object):
def __init__(self,Name,Date,Visitors,Mvts,SampleRate):
self.Name = Name
self.Date = Date
self.Visitors = Visitors
self.TotalVisits = Visitors["id"].count()
self.VisitsDistribution = Visitors.groupby(Visitors.time.dt.hour)["id"].aggregate(lambda x:len(np.unique(x)))
self.Wait_Time_Distribution = self.get_Wait_Time_Distribution(Mvts,SampleRate)
self.Wait_Time = self.get_AverageWaitTime()
def get_Wait_Time_Distribution(self,Mvts,SampleRate):
dist = {}
VisitorsGroupByHour = self.Visitors.groupby(self.Visitors.time.dt.hour)
for key,group in VisitorsGroupByHour:
dist[key] = self.get_Wait_Time(group,Mvts,SampleRate)
return pd.Series(dist)
def get_AverageWaitTime(self):
AverageTime = 0
for index,time_wait in self.Wait_Time_Distribution.iteritems():
AverageTime = AverageTime + time_wait
if len(self.Wait_Time_Distribution) == 0:
return 0
else:
return AverageTime / len(self.Wait_Time_Distribution)
def get_Wait_Time(self,People,Mvts,SampleRate):
if len(People.index) > SampleRate:
PplSample = People.sample(SampleRate)
else:
PplSample = People
wait_time=0
counter = 0
for index, person in PplSample.iterrows():
t_i = person["time"]
try:
first_mvn = Mvts.loc[(Mvts["id"] == person["id"]) & (Mvts["time"] > t_i)]
lets_continue = True
except KeyError:
lets_continue = False
if lets_continue == True:
t_f = first_mvn["time"].min()
if isinstance(t_f, pd.tslib.Timestamp):
t_d = (t_f - t_i)
if isinstance(t_d, pd.tslib.Timedelta):
t_d = t_d / np.timedelta64(1,'m')
wait_time = wait_time + t_d
counter = counter+1
if counter == 0:
return 0
else:
return wait_time / counter
def get_Wait_Time_Rate(People,Mvts,SampleRate):
if len(People.index) > SampleRate:
PplSample = People.sample(SampleRate)
else:
PplSample = People
wait_time=0
counter = 0
for index, person in PplSample.iterrows():
t_i = person["time"]
try:
first_mvn = Mvts.loc[(Mvts["id"] == person["id"]) & (Mvts["time"] > t_i)]
lets_continue = True
except KeyError:
lets_continue = False
if lets_continue == True:
t_f = first_mvn["time"].min()
if isinstance(t_f, pd.tslib.Timestamp):
t_d = (t_f - t_i)
if isinstance(t_d, pd.tslib.Timedelta):
t_d = t_d / np.timedelta64(1,'m')
wait_time = wait_time + t_d
counter = counter+1
if counter == 0:
return 0
else:
return wait_time / counter
#Clase para trabajar Datos por Atraccion
class AttractionDayInfo(object):
def __init__(self,Attraction):
self.Attraction = Attraction
self.DaysInfo = {}
def setDaysInfo(self,dayData):
self.DaysInfo[dayData.Name]=dayData
#Clase para trabajar con cada atraccion
class Attraction(object):
def __init__(self,X,Y,Name,Number,Kind):
self.X = X
self.Y = Y
self.Name = Name
self.Number = Number
self.Kind = Kind
#Clase para trabajar con cada atraccion
class DayData(object):
def __init__(self,DayName,DayDate,DayData):
self.DayName = DayName
self.DayDate = DayDate
self.DayData = DayData
#Clase para mirar el comportamiento del parque por dia
class ParkBehavior(object):
def __init__(self,DayName,DayDate,DayBehaviour):
self.DayName = DayName
self.DayDate = DayDate
self.DayBehaviour = DayBehaviour
#Clase para trabajar con cada atraccion
class ParkOccupancyBehavior(object):
def __init__(self,TotalVisitors,TotalVisitorsPerHour,PplDuration,VisitedAttractionPerP,AttractionKindsDistribution):
self.TotalVisitors = TotalVisitors
self.TotalVisitorsPerHour = TotalVisitorsPerHour
self.TotalVisitorsPerZone = None
self.PplDuration = PplDuration
self.VisitedAttractionPerP = VisitedAttractionPerP
self.AttractionKindsDistribution = AttractionKindsDistribution
def set_Park_Ocupancy_Per_Zone(self,Bins,DayData):
cnts, xe, ye = np.histogram2d(DayData["X"], DayData["Y"], range =((0,100), (0,100)), normed = True, bins =Bins)
self.TotalVisitorsPerZone = Histogram2D(cnts,xe,ye)
def get_Park_Ocupancy_Per_Zone(self):
return self.TotalVisitorsPerZone
#Clase para trabajar con cada atraccion
class Histogram2D(object):
def __init__(self,data,y_axis,x_axis):
self.data = data
self.y_axis = y_axis
self.x_axis = x_axis
def obj_dict(obj):
try:
if isinstance(obj,pd.Series):
return obj.to_dict()
else:
return obj.__dict__
except:
return None
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("dino_analytics.html")
class AttractionDataHandler(tornado.web.RequestHandler):
def get(self):
self.write({"attractions" : json.dumps(self.atb, default=obj_dict)})
def initialize(self, atb):
self.atb = atb
class RenewDataHandler(tornado.web.RequestHandler):
def get(self):
Dataset = self.Dataset
data_Attractions = self.data_Attractions
t_min = np.datetime64(int(self.get_argument("t_min")),"ms")
t_max = np.datetime64(int(self.get_argument("t_max")),"ms")
day = self.get_argument("day")
dataset = Dataset[day]
checkins = dataset.loc[(dataset["time"] >= t_min) & (dataset["time"] <= t_max) & (dataset["type"]=="check-in")]
movements = dataset.loc[dataset["type"]=="movement"]
answers = {}
for index,row in data_Attractions.iterrows():
print("Processing attaction "+str(index+1)+" of "+str(len(data_Attractions))+": "+row["Name"])
atr_Num = row["Number"]
checkin_data = checkins.loc[(checkins["X"] == row["X"]) & (checkins["Y"] == row["Y"])]
answ = {}
answ["Wait_Time"]=get_Wait_Time_Rate(checkin_data,movements,5)
answ["TotalVisitors"]=checkin_data["id"].count()
answers[atr_Num]=answ
self.write({"attractions_new" : json.dumps(answers, default=obj_dict)})
def initialize(self, Dataset, data_Attractions):
self.Dataset = Dataset
self.data_Attractions = data_Attractions
class LoadDataHandler(tornado.web.RequestHandler):
def get(self):
pkb = self.pkb
self.write({"park" : json.dumps(pkb, default=obj_dict)})
def initialize(self, pkb):
self.pkb = pkb
settings = {"template_path" : os.path.dirname(__file__),
"static_path" : os.path.join(os.path.dirname(__file__),"static"),
"debug" : True
}
if __name__ == "__main__":
path_behaviour = os.path.join(os.path.dirname(__file__), "static/data/OccupancyBehaviorPerDay.data")
park_behaviour = pk.load(open(path_behaviour,"rb"))
path_attraction = os.path.join(os.path.dirname(__file__), "static/data/AttractionsBehab.data")
attraction_behaviour = pk.load(open(path_attraction,"rb"))
print('loading...')
# df = pd.read_csv(path)
# df["time"] = pd.to_datetime(df.Timestamp, format="%Y-%m-%d %H:%M:%S")
# df2 = pd.read_csv(path2)
# df2["time"] = pd.to_datetime(df2.Timestamp, format="%Y-%m-%d %H:%M:%S")
data_Attractions = pd.read_csv(os.path.join(os.path.dirname(__file__), "static/data/Attraction_Data.csv"))
Dataset = {"Friday":pd.read_csv(os.path.join(os.path.dirname(__file__), "static/data/park-movement-Fri.csv")),"Saturday":pd.read_csv(os.path.join(os.path.dirname(__file__), "static/data/park-movement-Sat.csv")),"Sunday":pd.read_csv(os.path.join(os.path.dirname(__file__), "static/data/park-movement-Sun.csv"))}
#Dataset = {"Friday":pd.read_csv(os.path.join(os.path.dirname(__file__), "static/data/park-movement-Fri.csv"))}
for key, value in Dataset.iteritems():
print("Processing day "+str(key))
value["time"] = pd.to_datetime(value.Timestamp, format="%Y-%m-%d %H:%M:%S")
Dataset[key] = value
application = tornado.web.Application([
(r"/", MainHandler),
(r"/load_data", LoadDataHandler,{"pkb":park_behaviour}),
(r"/renew_data", RenewDataHandler,{"Dataset":Dataset,"data_Attractions":data_Attractions}),
(r"/attraction_data", AttractionDataHandler, {"atb":attraction_behaviour}),
(r"/static/(.*)", tornado.web.StaticFileHandler,
{"path": settings["static_path"]})
], **settings)
application.listen(8100)
print("ready")
tornado.ioloop.IOLoop.current().start()
| mit |
nvoron23/scikit-learn | sklearn/pipeline.py | 61 | 21271 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
faneshion/MatchZoo | matchzoo/datasets/snli/load_data.py | 1 | 3067 | """SNLI data loader."""
import typing
from pathlib import Path
import pandas as pd
import keras
import matchzoo
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `ranking`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=4, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
return matchzoo.pack(df)
| apache-2.0 |
erh3cq/hyperspy | hyperspy/docstrings/plot.py | 3 | 9889 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
"""Common docstring snippets for plot.
"""
BASE_PLOT_DOCSTRING_PARAMETERS = \
"""navigator : str, None, or :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
Allowed string values are ``'auto'``, ``'slider'``, and ``'spectrum'``.
If ``'auto'``:
- If `navigation_dimension` > 0, a navigator is
provided to explore the data.
- If `navigation_dimension` is 1 and the signal is an image
the navigator is a sum spectrum obtained by integrating
over the signal axes (the image).
- If `navigation_dimension` is 1 and the signal is a spectrum
the navigator is an image obtained by stacking all the
spectra in the dataset horizontally.
- If `navigation_dimension` is > 1, the navigator is a sum
image obtained by integrating the data over the signal axes.
- Additionally, if `navigation_dimension` > 2, a window
with one slider per axis is raised to navigate the data.
- For example, if the dataset consists of 3 navigation axes `X`,
`Y`, `Z` and one signal axis, `E`, the default navigator will
be an image obtained by integrating the data over `E` at the
current `Z` index and a window with sliders for the `X`, `Y`,
and `Z` axes will be raised. Notice that changing the `Z`-axis
index changes the navigator in this case.
If ``'slider'``:
- If `navigation dimension` > 0 a window with one slider per
axis is raised to navigate the data.
If ``'spectrum'``:
- If `navigation_dimension` > 0 the navigator is always a
spectrum obtained by integrating the data over all other axes.
If ``None``, no navigator will be provided.
Alternatively a :py:class:`~hyperspy.signal.BaseSignal` (or subclass)
instance can be provided. The `signal_dimension` must be 1 (for a
spectrum navigator) or 2 (for a image navigator) and
`navigation_shape` must be 0 (for a static navigator) or
`navigation_shape` + `signal_shape` must be equal to the
`navigator_shape` of the current object (for a dynamic navigator).
If the signal `dtype` is RGB or RGBA this parameter has no effect and
the value is always set to ``'slider'``.
axes_manager : None or :py:class:`~hyperspy.axes.AxesManager`
If None, the signal's `axes_manager` attribute is used.
plot_markers : bool, default True
Plot markers added using s.add_marker(marker, permanent=True).
Note, a large number of markers might lead to very slow plotting.
navigator_kwds : dict
Only for image navigator, additional keyword arguments for
:py:func:`matplotlib.pyplot.imshow`.
"""
BASE_PLOT_DOCSTRING = \
"""Plot the signal at the current coordinates.
For multidimensional datasets an optional figure,
the "navigator", with a cursor to navigate that data is
raised. In any case it is possible to navigate the data using
the sliders. Currently only signals with signal_dimension equal to
0, 1 and 2 can be plotted.
Parameters
----------
"""
PLOT1D_DOCSTRING = \
"""norm : str, optional
The function used to normalize the data prior to plotting.
Allowable strings are: ``'auto'``, ``'linear'``, ``'log'``.
(default value is ``'auto'``).
If ``'auto'``, intensity is plotted on a linear scale except when
``power_spectrum=True`` (only for complex signals).
autoscale : str
The string must contain any combination of the 'x' and 'v'
characters. If 'x' or 'v' (for values) are in the string, the
corresponding horizontal or vertical axis limits are set to their
maxima and the axis limits will reset when the data or the
navigation indices are changed. Default is 'v'.
"""
PLOT2D_DOCSTRING = \
"""colorbar : bool, optional
If true, a colorbar is plotted for non-RGB images.
autoscale : str
The string must contain any combination of the 'x', 'y' and 'v'
characters. If 'x' or 'y' are in the string, the corresponding
axis limits are set to cover the full range of the data at a given
position. If 'v' (for values) is in the string, the contrast of the
image will be set automatically according to `vmin` and `vmax` when
the data or navigation indices change. Default is 'v'.
saturated_pixels: scalar
The percentage of pixels that are left out of the bounds.
For example, the low and high bounds of a value of 1 are the 0.5%
and 99.5% percentiles. It must be in the [0, 100] range.
If None (default value), the value from the preferences is used.
.. deprecated:: 1.6.0
`saturated_pixels` will be removed in HyperSpy 2.0.0, it is replaced
by `vmin`, `vmax` and `autoscale`.
norm : {"auto", "linear", "power", "log", "symlog" or a subclass of
:py:class:`matplotlib.colors.Normalise`}
Set the norm of the image to display. If "auto", a linear scale is
used except if when `power_spectrum=True` in case of complex data
type. "symlog" can be used to display negative value on a negative
scale - read :py:class:`matplotlib.colors.SymLogNorm` and the
`linthresh` and `linscale` parameter for more details.
vmin, vmax : {scalar, str}, optional
`vmin` and `vmax` are used to normalise the displayed data. It can
be a float or a string. If string, it should be formatted as 'xth',
where 'x' must be an float in the [0, 100] range. 'x' is used to
compute the x-th percentile of the data. See
:py:func:`numpy.percentile` for more information.
gamma : float
Parameter used in the power-law normalisation when the parameter
norm="power". Read :py:class:`matplotlib.colors.PowerNorm` for more
details. Default value is 1.0.
linthresh : float
When used with norm="symlog", define the range within which the
plot is linear (to avoid having the plot go to infinity around
zero). Default value is 0.01.
linscale : float
This allows the linear range (-linthresh to linthresh) to be
stretched relative to the logarithmic range. Its value is the
number of powers of base to use for each half of the linear range.
See :py:class:`matplotlib.colors.SymLogNorm` for more details.
Defaulf value is 0.1.
scalebar : bool, optional
If True and the units and scale of the x and y axes are the same a
scale bar is plotted.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color.
axes_ticks : {None, bool}, optional
If True, plot the axes ticks. If None axes_ticks are only
plotted when the scale bar is not plotted. If False the axes ticks
are never plotted.
axes_off : {bool}
Default is False.
no_nans : bool, optional
If True, set nans to zero for plotting.
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
min_aspect : float
Set the minimum aspect ratio of the image and the figure. To
keep the image in the aspect limit the pixels are made
rectangular.
"""
COMPLEX_DOCSTRING = \
"""power_spectrum : bool, default is False.
If True, plot the power spectrum instead of the actual signal, if
False, plot the real and imaginary parts of the complex signal.
representation : {'cartesian' or 'polar'}
Determines if the real and imaginary part of the complex data is plotted ('cartesian',
default), or if the amplitude and phase should be used ('polar').
same_axes : bool, default True
If True (default) plot the real and
imaginary parts (or amplitude and phase) in the same figure if
the signal is one-dimensional.
fft_shift : bool, default False
If True, shift the zero-frequency component.
See :py:func:`numpy.fft.fftshift` for more details.
"""
PLOT2D_KWARGS_DOCSTRING = \
"""**kwargs
Only when plotting an image: additional (optional) keyword
arguments for :py:func:`matplotlib.pyplot.imshow`.
"""
| gpl-3.0 |
mhallsmoore/qstrader | tests/unit/system/rebalance/test_weekly_rebalance.py | 1 | 1884 | import pandas as pd
import pytest
import pytz
from qstrader.system.rebalance.weekly import WeeklyRebalance
@pytest.mark.parametrize(
"start_date,end_date,weekday,pre_market,expected_dates,expected_time",
[
(
'2020-03-11', '2020-05-17', 'MON', False, [
'2020-03-16', '2020-03-23', '2020-03-30', '2020-04-06',
'2020-04-13', '2020-04-20', '2020-04-27', '2020-05-04',
'2020-05-11'
], '21:00:00'
),
(
'2019-12-26', '2020-02-07', 'WED', True, [
'2020-01-01', '2020-01-08', '2020-01-15', '2020-01-22',
'2020-01-29', '2020-02-05'
], '14:30:00'
)
]
)
def test_weekly_rebalance(
start_date, end_date, weekday, pre_market, expected_dates, expected_time
):
"""
Checks that the weekly rebalance provides the correct business
datetimes for the provided range.
"""
sd = pd.Timestamp(start_date, tz=pytz.UTC)
ed = pd.Timestamp(end_date, tz=pytz.UTC)
reb = WeeklyRebalance(
start_date=sd, end_date=ed, weekday=weekday, pre_market=pre_market
)
actual_datetimes = reb._generate_rebalances()
expected_datetimes = [
pd.Timestamp('%s %s' % (expected_date, expected_time), tz=pytz.UTC)
for expected_date in expected_dates
]
assert actual_datetimes == expected_datetimes
def test_check_weekday_raises_value_error():
"""
Checks that initialisation of WeeklyRebalance raises
a ValueError if the weekday string is in the incorrect
format.
"""
sd = pd.Timestamp('2020-01-01', tz=pytz.UTC)
ed = pd.Timestamp('2020-02-01', tz=pytz.UTC)
pre_market = True
weekday = 'SUN'
with pytest.raises(ValueError):
WeeklyRebalance(
start_date=sd, end_date=ed, weekday=weekday, pre_market=pre_market
)
| mit |
fredhohman/pymks | pymks/tests/test.py | 6 | 2732 | import numpy as np
def test_elastic_FE_simulation_3D():
from pymks.datasets.elastic_FE_simulation import ElasticFESimulation
nx = 5
ii = (nx - 1) / 2
X = np.zeros((1, nx, nx, nx), dtype=int)
X[0, :, ii] = 1
model = ElasticFESimulation(elastic_modulus=(1., 10.),
poissons_ratio=(0., 0.))
model.run(X)
solution = [1., 0., 0., 0., 0., 0.]
assert np.allclose([np.mean(model.strain[0, ..., i]) for i in range(6)],
solution)
def test_elastic_FE_simulation_3D_BCs():
from pymks.datasets.elastic_FE_simulation import ElasticFESimulation
np.random.seed(8)
N = 4
X = np.random.randint(2, size=(1, N, N, N))
macro_strain = 0.1
sim = ElasticFESimulation((10.0, 1.0), (0.3, 0.3), macro_strain=0.1)
sim.run(X)
u = sim.displacement[0]
# Check the left/right offset
assert np.allclose(u[-1, ..., 0] - u[0, ..., 0], N * macro_strain)
# Check the left/right y-periodicity
assert np.allclose(u[0, ..., 1], u[-1, ..., 1])
def get_delta_data(nx, ny):
from pymks.datasets import make_elastic_FE_strain_delta
return make_elastic_FE_strain_delta(elastic_modulus=(1, 1.1),
poissons_ratio=(0.3, 0.3),
size=(nx, ny))
def get_random_data(nx, ny):
from pymks.datasets import make_elastic_FE_strain_random
np.random.seed(8)
return make_elastic_FE_strain_random(elastic_modulus=(1., 1.1),
poissons_ratio=(0.3, 0.3),
n_samples=1,
size=(nx, ny))
def roll_zip(*args):
return list(zip(*tuple(np.rollaxis(x, -1) for x in args)))
def test_cahn_hilliard():
from pymks.datasets.cahn_hilliard_simulation import CahnHilliardSimulation
from pymks.datasets import make_cahn_hilliard
from sklearn import metrics
from pymks import MKSRegressionModel
from pymks import ContinuousIndicatorBasis
mse = metrics.mean_squared_error
n_samples = 100
n_spaces = 20
dt = 1e-3
np.random.seed(0)
X, y = make_cahn_hilliard(n_samples=n_samples,
size=(n_spaces, n_spaces), dt=dt)
basis = ContinuousIndicatorBasis(10, [-1, 1])
model = MKSRegressionModel(basis)
model.fit(X, y)
X_test = np.array([np.random.random((n_spaces,
n_spaces)) for i in range(1)])
CHSim = CahnHilliardSimulation(dt=dt)
CHSim.run(X_test)
y_test = CHSim.response
y_pred = model.predict(X_test)
assert mse(y_test[0], y_pred[0]) < 0.03
if __name__ == '__main__':
test_MKS_elastic_delta()
| mit |
7630155/tushare | tushare/stock/shibor.py | 38 | 5010 | # -*- coding:utf-8 -*-
"""
上海银行间同业拆放利率(Shibor)数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
from tushare.util import dateu as du
def shibor_data(year=None):
"""
获取上海银行间同业拆放利率(Shibor)
Parameters
------
year:年份(int)
Return
------
date:日期
ON:隔夜拆放利率
1W:1周拆放利率
2W:2周拆放利率
1M:1个月拆放利率
3M:3个月拆放利率
6M:6个月拆放利率
9M:9个月拆放利率
1Y:1年拆放利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Shibor']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor',
year, lab,
year))
df.columns = ct.SHIBOR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_quote_data(year=None):
"""
获取Shibor银行报价数据
Parameters
------
year:年份(int)
Return
------
date:日期
bank:报价银行名称
ON:隔夜拆放利率
ON_B:隔夜拆放买入价
ON_A:隔夜拆放卖出价
1W_B:1周买入
1W_A:1周卖出
2W_B:买入
2W_A:卖出
1M_B:买入
1M_A:卖出
3M_B:买入
3M_A:卖出
6M_B:买入
6M_A:卖出
9M_B:买入
9M_A:卖出
1Y_B:买入
1Y_A:卖出
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Quote']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Quote',
year, lab,
year), skiprows=[0])
df.columns = ct.QUOTE_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def shibor_ma_data(year=None):
"""
获取Shibor均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
其它分别为各周期5、10、20均价
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'Shibor_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.SHIBOR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_data(year=None):
"""
获取贷款基础利率(LPR)
Parameters
------
year:年份(int)
Return
------
date:日期
1Y:1年贷款基础利率
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR',
year, lab,
year))
df.columns = ct.LPR_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
def lpr_ma_data(year=None):
"""
获取贷款基础利率均值数据
Parameters
------
year:年份(int)
Return
------
date:日期
1Y_5:5日均值
1Y_10:10日均值
1Y_20:20日均值
"""
year = du.get_year() if year is None else year
lab = ct.SHIBOR_TYPE['LPR_Tendency']
lab = lab.encode('utf-8') if ct.PY3 else lab
try:
df = pd.read_excel(ct.SHIBOR_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['shibor'],
ct.PAGES['dw'], 'LPR_Tendency',
year, lab,
year), skiprows=[0])
df.columns = ct.LPR_MA_COLS
df['date'] = df['date'].map(lambda x: x.date())
df['date'] = df['date'].astype(np.datetime64)
return df
except:
return None
| bsd-3-clause |
iandriver/RNA-sequence-tools | FPKM_Parsing/make_align_report_single.py | 2 | 2313 | import fnmatch
import os
import pandas as pd
import cPickle as pickle
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import OrderedDict
path = '/Volumes/Seq_data'
result_file_names = ['results_kidney_raw', 'results_liver_raw', 'results_lung_raw']
cell_list =[]
align_dict =OrderedDict()
align_dict['input_num'] = []
align_dict['mapped_num'] = []
align_dict['mult_num'] = []
align_dict['per_mapped'] = []
align_dict['mult_mapped_per'] = []
for rf in result_file_names:
path_to_file = os.path.join(path, rf)
for root, dirnames, filenames in os.walk(path_to_file):
for filename in fnmatch.filter(filenames, 'align_summary.txt'):
g_cell_name = root.split('/')[-1]
num = g_cell_name.split('_')[-1]
if len(num) == 2:
cell_name = '_'.join(g_cell_name.split('_')[0:-1])+'_'+'0'+num
elif len(num) == 1:
cell_name = '_'.join(g_cell_name.split('_')[0:-1])+'_'+'00'+num
else:
cell_name = g_cell_name
cell_list.append(cell_name)
f = open(os.path.join(root,'align_summary.txt'), 'rw')
for l in f:
if "Input" in l:
input_num = int(l.split(':')[-1])
if "Mapped" in l:
mapped_1 = l.split(':')[-1]
mapped_num = int(mapped_1.split('(')[0].strip())
per_mapped_1 = mapped_1.split('(')[1]
per_mapped = per_mapped_1.split('%')[0]
if "of these" in l:
mult_1 = l.split(':')[-1]
mult_num = int(mult_1.split('(')[0].strip())
mult_per_1 = mult_1.split('(')[1]
mult_per = mult_per_1.split('%')[0]
align_dict['input_num'].append(input_num)
align_dict['mapped_num'].append(mapped_num)
align_dict['mult_num'].append(mult_num)
align_dict['per_mapped'].append(per_mapped)
align_dict['mult_mapped_per'].append(mult_per)
f.close()
align_df = pd.DataFrame(align_dict, index = cell_list)
align_df.to_csv(os.path.join(path,'counts_sheppard_all','results_sheppard_all_align.txt'), sep = '\t')
plt.hist(align_df['mapped_num'])
plt.show()
with open(os.path.join(path,'counts_sheppard_all','results_sheppard_all_align.p'), 'wb') as fp:
pickle.dump(align_df, fp)
| mit |
gef756/statsmodels | docs/source/conf.py | 27 | 11559 | # -*- coding: utf-8 -*-
#
# statsmodels documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 22 11:17:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'numpy_ext.numpydoc',
'github' # for GitHub links
]
import sphinx
if sphinx.__version__ == '1.1.3':
print ("WARNING: Not building inheritance diagrams on sphinx 1.1.3. "
"See https://github.com/statsmodels/statsmodels/issues/1002")
extensions.remove('sphinx.ext.inheritance_diagram')
# plot_directive is broken on old matplotlib
from matplotlib import __version__ as mpl_version
from distutils.version import LooseVersion
if LooseVersion(mpl_version) < LooseVersion('1.0.1'):
extensions.remove('matplotlib.sphinxext.plot_directive')
extensions.append('numpy_ext.plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'statsmodels'
copyright = u'2009-2013, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
autosummary_generate = True
autoclass_content = 'class'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from statsmodels.version import short_version, full_version
release = short_version
# The full version, including dev tag.
version = full_version
# set inheritance_graph_attrs
# you need graphviz installed to use this
# see: http://sphinx.pocoo.org/ext/inheritance.html
# and graphviz dot documentation http://www.graphviz.org/content/attrs
#NOTE: giving the empty string to size allows graphviz to figure out
# the size
inheritance_graph_attrs = dict(size='""', ratio="compress", fontsize=14,
rankdir="LR")
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
# color='dodgerblue1', style='filled')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*/autosummary/class.rst', '*/autosummary/glmfamilies.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if 'htmlhelp' in sys.argv:
#html_theme = 'statsmodels_htmlhelp' #doesn't look nice yet
html_theme = 'default'
print '################# using statsmodels_htmlhelp ############'
else:
html_theme = 'statsmodels'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/statsmodels_hybi_banner.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/statsmodels_hybi_favico.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index' : ['indexsidebar.html','searchbox.html','sidelinks.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'statsmodelsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'statsmodels.tex', u'statsmodels Documentation',
u'Josef Perktold, Skipper Seabold', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# pngmath options
# http://sphinx-doc.org/ext/math.html#module-sphinx.ext.pngmath
pngmath_latex_preamble=r'\usepackage[active]{preview}' # + other custom stuff for inline math, such as non-default math fonts etc.
pngmath_use_preview=True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'statsmodels', u'statsmodels Documentation',
[u'Josef Perktold, Skipper Seabold, Jonathan Taylor'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'statsmodels'
epub_author = u'Josef Perktold, Skipper Seabold'
epub_publisher = u'Josef Perktold, Skipper Seabold'
epub_copyright = u'2009-2013, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy' : ('http://docs.scipy.org/doc/numpy/', None),
'python' : ('http://docs.python.org/3.2', None),
'pydagogue' : ('http://matthew-brett.github.io/pydagogue/', None),
'patsy' : ('http://patsy.readthedocs.org/en/latest/', None),
'pandas' : ('http://pandas.pydata.org/pandas-docs/dev/', None),
}
from os.path import dirname, abspath, join
plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
# ghissue config
github_project_url = "https://github.com/statsmodels/statsmodels"
# for the examples landing page
import json
example_context = json.load(open('examples/landing.json'))
html_context = {'examples': example_context }
| bsd-3-clause |
kjung/scikit-learn | sklearn/kernel_ridge.py | 37 | 6556 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
jakejhansen/minesweeper_solver | evolutionary/hpcjobs/core/es-multi-threaded-legacy.py | 2 | 8193 | import multiprocessing as mp
import time
import gym
import IPython
import pickle
import numpy as np
from joblib import Parallel, delayed
from keras.layers import Dense
from keras.models import Input, Model, Sequential, clone_model
from keras.optimizers import Adam
import argparse
import matplotlib.pyplot as plt
import os
import pathlib
def fitness_rank_transform(rewards):
# Performs the fitness rank transformation used for CMA-ES.
# Reference: Natural Evolution Strategies [2014]
n = len(rewards)
sorted_indices = np.argsort(-rewards)
u = np.zeros(n)
for k in range(n):
u[sorted_indices[k]] = np.max([0, np.log(n/2+1)-np.log(k+1)])
u = u/np.sum(u)-1/n
return u
def pickle_save(obj, name, directory=None):
if directory is None:
directory = os.getcwd()
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
with open(directory + name + '.pkl', 'wb') as f:
pickle.dump(obj, f)
def pickle_load(name, directory=None):
with open(directory + name + '.pkl', 'wb') as f:
return pickle.load(f)
class Evolver(object):
def __init__(self, model, envs, learning_rate=0.001, sigma=0.1, workers=mp.cpu_count()):
self.nWorkers = workers
self.model = model
self.envs = envs
self.weights = self.model.get_weights()
self.learning_rate = learning_rate
self.sigma = sigma
self.population_size = len(self.envs)
self.results = results = {'generations': [], 'population_rewards': [],
'test_rewards': [], 'time': []}
def print_progress(self, gen=1, generations=1):
if self.print_every and (gen % self.print_every == 0 or gen == generations - 1):
print('Generation {:>4d} | Test reward {: >6.1f} | Mean pop reward {: >6.1f} | Time {:>4.2f} seconds'.format(
gen, self.results['test_rewards'][-1], np.mean(self.results['population_rewards'][-1]), self.results['time'][-1]))
def make_checkpoint(self, gen=1, generations=1):
if self.checkpoint_every and (gen % self.checkpoint_every == 0 or gen == generations - 1):
self.model.save_weights('weights.h5')
pickle_save(self.results, 'results')
def load_checkpoint(self, filename):
raise NotImplementedError
def plot_progress(self, gen=1, generations=1):
if self.plot_every and (gen % self.plot_every == 0 or gen == generations - 1):
fig = plt.figure()
plt.plot(self.results['generations'], np.mean(self.results['population_rewards'], 1))
plt.plot(self.results['generations'], self.results['test_rewards'])
plt.xlabel('Generation')
plt.ylabel('Reward')
plt.legend(['Mean population reward', 'Test reward'])
plt.tight_layout()
plt.savefig('progress.pdf')
plt.close(fig)
def evolve(self, generations, print_every=0, plot_every=0, checkpoint_every=50):
self.print_every = print_every
self.plot_every = plot_every
self.checkpoint_every = checkpoint_every
with mp.Pool(self.nWorkers) as p:
for gen in range(generations):
t_start = time.time()
# noise = []
# weights_try = []
# rewards = np.zeros(self.population_size)
# for i in range(self.population_size):
# x = []
# for w in self.weights:
# x.append(np.random.randn(*w.shape))
# noise.append(x)
# weights_try.append(self.permute_weights(noise[i]))
# Evaluate fitness
# TODO figure out how to give permuted weights
# TODO passed arguments have their old name (e.g. 'name' in self.model=name) FIX THIS
inputs = zip(self.envs, [self.model]*self.population_size, [True]*self.population_size)
output = p.map(self.fitnessfun, inputs)
rewards = [t[0] for t in output]
noise = [t[1] for t in output]
# [(noise1, reward1), (n2,r2), ...]
# noise = [noise1, noise2, ...]
# reward = ...
# rewards = []
# for i in range(self.population_size):
# self.model.set_weights(weights_try[i])
# rewards.append(fitnessfun(self.model, self.envs[i]))
fitnesses = fitness_rank_transform(np.array(rewards))
#fitnesses = (rewards - np.mean(rewards))/np.std(rewards)
#IPython.embed()
for index, w in enumerate(self.weights):
A = np.array([n[index] for n in noise])
self.weights[index] = w + self.learning_rate/(self.population_size*self.sigma) * np.dot(A.T, fitnesses).T
self.model.set_weights(self.weights)
t = time.time()-t_start
test_reward = self.fitnessfun((self.envs[0], self.model, False))[0]
self.results['generations'].append(gen)
self.results['population_rewards'].append(rewards)
self.results['test_rewards'].append(test_reward)
self.results['time'].append(t)
# On cluster, extract plot data using sed like so
# sed -e 's/.*Reward \(.*\) | Time.*/\1/' deep/evo/CartPole-v1-\(4\)/output_008.txt > plotres.txt
self.print_progress(gen, generations)
self.make_checkpoint(gen, generations)
self.plot_progress(gen, generations)
self.make_checkpoint()
return self.results
def permute_weights(self, p):
weights = []
for index, i in enumerate(p):
jittered = self.sigma*i
weights.append(self.weights[index] + jittered)
return weights
def get_noise(self):
noise = []
for w in self.weights:
noise.append(np.random.randn(*w.shape))
return noise
def fitnessfun(self, tup):
env, model, do_permute = tup
noise = []
if do_permute:
noise = self.get_noise()
weights = self.permute_weights(noise)
model.set_weights(weights)
observation = env.reset()
o_shape = observation.shape
total_reward = 0
done = False
while not done:
action = model.predict(observation.reshape((1,)+o_shape))
observation, reward, done, info = env.step(np.argmax(action))
total_reward += reward
return (total_reward, noise)
def testfun(model, env, episodes):
o_shape = env.observation_space.shape
total_reward = []
for i in range(episodes):
total_reward.append(0)
observation = env.reset()
done = False
while not done:
action = model.predict(observation.reshape((1,)+o_shape))
observation, reward, done, info = env.step(np.argmax(action))
env.render()
total_reward[i] += reward
return total_reward
parser = argparse.ArgumentParser()
parser.add_argument('--nwrk', type=int, default=-1)
parser.add_argument('--nags', type=int, default=20)
parser.add_argument('--ngns', type=int, default=1000)
args = parser.parse_args()
envs = [gym.make('CartPole-v0') for i in range(args.nags)]
o_shape = envs[0].observation_space.shape
a_shape = envs[0].action_space.n
model = Sequential()
model.add(Dense(input_shape=o_shape, units=32))
model.add(Dense(units=128))
model.add(Dense(units=128))
model.add(Dense(units=a_shape))
model.compile(optimizer='adam', loss='mean_squared_error')
model.summary()
envs_simple = range(args.nags)
if __name__ == '__main__':
try:
mp.freeze_support()
e = Evolver(model=model, envs=envs, learning_rate=0.01, sigma=0.1, workers=args.nwrk)
results = e.evolve(args.ngns, print_every=1, plot_every=10)
model.load_weights('weights.h5')
testfun(model, envs[0], 10)
except expression as identifier:
pass
| mit |
lin-credible/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
cwu2011/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
vsaw/LIRD | lird.py | 1 | 17209 | #!/usr/bin/pyhton
import csv
import time
import numpy as np
from sklearn import svm
from sklearn import tree
from sklearn import neighbors
from sklearn import preprocessing
from sklearn import multiclass
from sklearn import cross_validation
from sklearn import metrics
from sklearn import ensemble
from sklearn.pipeline import Pipeline
from math import sqrt
import argparse
import exceptions
# Stores the parsed command line args
args = None
# use labelEncoder.transform() and .inverse_transform() to convert from
# letters to numbers and vice versa.
# The label encoder will be instantiated during data parsing.
labelEncoder = None
# This label will be applied by the RadiusNeighborsClassifier to detect outliers
# in the data set.
OUTLIER_LABEL = '?'
# A list of all supported SVM kernels and classifiers
SVM_KERNELS = ['linear-svc', 'linear-ovr', 'linear', 'poly', 'rbf', 'sigmoid']
def _parse_csv(filename, typeCast=int):
'''
Parse data from CSV and return (y,X) with y a array of labels and X array
of observation vectors. X entries are casted using typeCast.
Global variable labelEncoder will be instantiated
'''
data = csv.reader(open(filename))
X = []
y = []
for row in data:
y.append(row[0])
X.append([typeCast(r) for r in row[1:len(row)]])
# sklearn classifiers expect classes to be integers.
# use preprocessing to encode the labels accordingly
global labelEncoder
labelEncoder = preprocessing.LabelEncoder()
# Add an extra outlier label '?' to the list of labels so that methods like
# RadiusNeighborsClassifier that detect outliers can give them a
# indistinguishable label.
labelEncoder.fit(y + [OUTLIER_LABEL]);
y = labelEncoder.transform(y)
return (y, X)
def __print_confusion_matrix(cm, labels):
'''
Print the confusion matrix in a nice way
This will leave out 0 entries and label the columns and rows
'''
print ' ',
for label in labels:
print ' %s' % label,
for i in range(cm.size):
if i % len(labels) == 0:
print ''
print labels[i / len(labels)],
if not cm.item(i) == 0:
print '%4d' % cm.item(i),
else:
print 4 * ' ',
def _evaluate_calssifier(clf, trainingSet, validationSet):
'''
Trains the given classifier clf with the training set and compares the
predicted results with the validation set.
Returns the trained classifier
'''
(tLabels, tVectors) = trainingSet
(vLabels, vVectors) = validationSet
t = time.time()
clf.fit(tVectors, tLabels)
trainingTime = time.time() - t
t = time.time()
pLabels = clf.predict(vVectors)
validationTime = time.time() - t
score = clf.score(vVectors, vLabels)
if (args.verbose > 1):
print ' Training %d elements in %d Seconds, Prediction in %d ' \
'Seconds' % (len(tVectors), trainingTime, validationTime)
if (args.verbose > 2):
errCount = sum([1 for (p, v) in zip(pLabels, vLabels) if p != v])
print ' %s errors out of %s validation vectors' % (errCount,
len(
vVectors))
label_names = labelEncoder.classes_
print ' Classification Report:'
print metrics.classification_report(vLabels, pLabels,
labelEncoder.transform(label_names), target_names=label_names)
print ' Confusion Matrix:'
cm = metrics.confusion_matrix(vLabels, pLabels,
labelEncoder.transform(label_names))
__print_confusion_matrix(cm, label_names)
print ''
elif (args.verbose > 1):
print ' %.2f%% Accuracy' % (score * 100)
return (score, trainingTime, validationTime)
def _evaluate_classifiers(classifiers, datasets):
'''
classifiers is expected to be a dictionary of classifiers and sets a dict
of (training,validation) data sets, each made up of (Labels, Vectors)
'''
quality = {}
for setKey in datasets.keys():
if (args.verbose > 1):
print 'Using Dataset %s:' % setKey
(trainingSet, validationSet) = datasets[setKey]
quality[setKey] = {}
for clfKey in classifiers.keys():
if (args.verbose > 1):
print ' %s:' % clfKey
res = _evaluate_calssifier(classifiers[clfKey],
trainingSet,
validationSet)
quality[setKey][clfKey] = res
if (args.verbose > 1):
print ' '
return quality
def __get_as_int_or_float(num):
# TODO Document
try:
if int(num) > 1:
return int(num)
except exceptions.ValueError:
return float(num)
def _prepare_data_set(trainingSetSize=16000, test_size=None, type=int,
scale=False, featureSelection=None):
'''
Load training and validation data in desired format
'''
train_size = __get_as_int_or_float(trainingSetSize)
if not test_size is None:
test_size = __get_as_int_or_float(test_size)
(labels, vectors) = _parse_csv('res/letter-recognition.data', type)
if (scale):
vectors = preprocessing.scale(np.array(vectors))
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
vectors, labels, test_size=test_size, train_size=train_size)
return ((y_train, x_train), (y_test, x_test))
def _prepare_classifiers(cmd_class=['all']):
classifiers = {}
select = args.select_features
has_all = 'all' in cmd_class
if has_all or 'svm' in cmd_class:
# see http://scikit-learn.org/stable/modules/svm.html#classification
kernels = args.svm_kernels
if 'all' in kernels:
kernels = SVM_KERNELS
for k in kernels:
if k == 'linear-ovr':
classifiers[
'SVC kernel=linear OvR'] = multiclass.OneVsRestClassifier(
svm.SVC(kernel='linear'))
elif k == 'linear-svc':
classifiers['Linear SVC'] = svm.LinearSVC()
else:
if args.svm_gamma is None:
g = 0.0
if k == 'sigmoid':
# TODO: Document this magic number
# Maximum dot product of the vectors in our data set
g = 1.0 / 962.0
else:
g = args.svm_gamma
classifiers['SVC kernel=%s' % k] = svm.SVC(kernel=k, gamma=g)
if has_all or 'tree' in cmd_class:
# see http://scikit-learn.org/stable/modules/tree.html
if select == 'off' or select == 'both':
classifiers['Default Decision Trees'] = tree.DecisionTreeClassifier()
if select == 'on' or select == 'both':
classifiers['Default Decision Trees Feature selection Pipeline'] = \
Pipeline([('selection', ensemble.ExtraTreesClassifier()),
('classification',tree.DecisionTreeClassifier())])
for maxf in frange(0.85, 0.90, 0.024):
for c in ['entropy', 'gini']:
clf = tree.DecisionTreeClassifier(max_features=maxf, criterion=c)
if select == 'on' or select == 'both':
pipe = Pipeline([('selection', ensemble
.ExtraTreesClassifier()),
('classification', clf)])
classifiers['Max Features %.2f with %s Decision Trees ' \
'Feature selection Pipeline' % (
maxf, c)] = pipe
if select == 'off' or select == 'both':
classifiers['Max Features %.2f with %s Decision Trees' %
(maxf, c)] = clf
if has_all or 'random' in cmd_class:
for i in range(10):
classifiers['Random Tree #%d' % i] = tree \
.ExtraTreeClassifier()
if has_all or 'ensemble' in cmd_class:
min_trees = args.min_trees
max_trees = args.max_trees
step = 0
divisor = 10
while step < 1:
step = int((max_trees - min_trees)/divisor)
divisor -= 1
for trees in range(min_trees, max_trees+1, step):
clf1 = ensemble.RandomForestClassifier(bootstrap=False,
n_estimators=trees)
clf2 = ensemble.ExtraTreesClassifier(bootstrap=False,
n_estimators=trees)
if select == 'on' or select == 'both':
pipe1 = Pipeline([('selection', ensemble
.ExtraTreesClassifier()),
('classification', clf1)])
pipe2 = Pipeline([('selection', ensemble
.ExtraTreesClassifier()),
('classification', clf2)])
classifiers['%d Random Forest ' \
'Feature selection Pipeline' % trees] = pipe1
classifiers['%d Extra Random Trees ' \
'Feature selection Pipeline' % trees] = pipe2
if select == 'off' or select == 'both':
classifiers['%d Random Forest' % trees] = clf1
classifiers['%d Extra Random Trees' % trees] = clf2
if has_all or 'kNN' in cmd_class or 'rNN' in cmd_class:
# see http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html#example-neighbors-plot-classification-py
selected_weights = args.NN_weights
if 'all' in args.NN_weights:
selected_weights = ['uniform', 'distance']
for weight in selected_weights:
if 'kNN' in cmd_class:
show_knn_warnings = args.verbose > 3
for k in args.kNN_neighbors:
classifiers['kNN %s k=%d' % (weight, int(k))] = \
neighbors.KNeighborsClassifier(k, weights=weight,
warn_on_equidistant=show_knn_warnings)
if 'rNN' in cmd_class:
# XXX: Buggy scikit does not handle the distance weight
#
# The following error message is being thrown by scikit when
# the distance weight is being used. This could not be fixed so
# it is being ignored instead.
#
# Traceback (most recent call last):
# File "lird.py", line 329, in <module>
# main()
# File "lird.py", line 316, in main
# quality = _evaluate_classifiers(classifiers, datasets)
# File "lird.py", line 130, in _evaluate_classifiers
# validationSet)
# File "lird.py", line 90, in _evaluate_calssifier
# pLabels = clf.predict(vVectors)
# File "/usr/lib/pymodules/python2.7/sklearn/neighbors/classification.py", line 307, in predict
# for (pl, w) in zip(pred_labels[inliers], weights)],
# File "/usr/lib/pymodules/python2.7/sklearn/utils/extmath.py", line 305, in weighted_mode
# w = np.zeros(a.shape, dtype=w.dtype) + w
# ValueError: operands could not be broadcast together with shapes (47) (194)
if weight == 'distance':
continue
# Assign the outlier class to outliers.
outlier_class = labelEncoder.transform([OUTLIER_LABEL])
for r in args.rNN_radius:
classifiers['rNN %s r=%f' % (weight, float(r))] = \
neighbors.RadiusNeighborsClassifier(radius=r,
weights=weight, outlier_label=outlier_class)
# Disabled because of horrible performance
# classifiers['NuSVC'] = svm.NuSVC()
return classifiers
def _prepare_data_sets(train_size, test_size, sets):
'''
Loads and names all desired datasets into a dirctionary
TODO proper cross validation see
http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation
'''
datasets = {}
if 'all' in sets or 'orig' in sets:
datasets['Out of the Box Integer Data'] = \
_prepare_data_set(train_size, test_size)
if 'all' in sets or 'scaled' in sets:
datasets['Normalizerd Float Data'] = \
_prepare_data_set(train_size, test_size, float, True)
return datasets
def _parse_args():
'''
Parse the command line arguments to select test at runtime
'''
parser = argparse.ArgumentParser()
parser.add_argument('classifiers', nargs='*', default='all',
choices=['all', 'svm', 'kNN', 'rNN', 'tree', 'random', 'ensemble'])
group_common = parser.add_argument_group('Common Options')
group_common.add_argument('-v', '--verbose', type=int, action='store',
default=1, choices=range(1, 5),
help='Verbose Level, Default = %(default)d \
(Levels: 1=Classifier Score, 2=Timings, \
3=Confusion Matrix and Result Report, 4=Internal scikit messages)')
group_common.add_argument('--train-size', default=16000, action='store',
help='Amount of data used for training. Can be either an int \
representing the absolute number of samples or a float between 0 and \
1.0 representing the ratio of train samples. Default %(default)s')
group_common.add_argument('--test-size', default=None, action='store',
help='Amount of data used for testing, if not specified the rest of \
the data set will be used. Like the train-size this can either be a \
int or a float.')
group_common.add_argument('--data', action='store', default=['all'],
choices=['all', 'orig', 'scaled'],
help='Select the preprocessing of the data. Default %(default)s')
tree_group = parser.add_argument_group('Decision Tree and Ensemble Methods')
tree_group.add_argument('--min-trees', action='store',default=10,type=int,
help='Minimum number of trees used in ensemble methods. \
Default %(default)d')
tree_group.add_argument('--max-trees', action='store',default=50,type=int,
help='Maximum number of trees used in ensemble methods. \
Default %(default)d')
tree_group.add_argument('--select-features', action='store', default='on',
choices=['on', 'off', 'both'],
help='Dis/enable feature selection before training. \
Default %(default)s')
svm_group = parser.add_argument_group('Support Vector Machines')
svm_group.add_argument('--svm-kernels', action='store', default=['all'],
choices=SVM_KERNELS, nargs='*',
help='Select the kernels that should be trained for the SVM. \
Default: %(default)s')
svm_group.add_argument('--svm-gamma', action='store', default=None,
type=float,
help='Sets the gamma parameter for the SVM kernels. If ommitted it will\
be 0.0 for all kernels except the sigmoid kernel where g will be set\
to max(abs(<v,w>)) where v and w are vectors from the data set.')
nn_group = parser.add_argument_group('Nearest Neighbors')
nn_group.add_argument('--NN-weights', action='store', default=['all'],
choices=['all', 'uniform', 'distance'], nargs='*',
help='The weighting method to be used by the Nearest Neighbors. \
Default: %(default)s')
nn_group.add_argument('--kNN-neighbors', action='store', default=[5],
nargs='*', type=int,
help='How many Neighbors should be used for classification. If more \
than one number is given multiple runs will be done. \
Default %(default)s')
nn_group.add_argument('--rNN-radius', action='store', default=[3.5], \
nargs='*', type=float,
help='What radius should be used for classification. If more \
than one number is given multiple runs will be done. \
Default %(default)s')
global args
args = parser.parse_args()
def frange(start, stop, step):
'''
helper function to get a range generator for float values.
'''
while start < stop:
yield start
start += step
def main():
secs = time.time()
_parse_args()
datasets = _prepare_data_sets(args.train_size, args.test_size, args.data)
classifiers = _prepare_classifiers(args.classifiers)
quality = _evaluate_classifiers(classifiers, datasets)
#rank classifiers by score and print highscore list
for setKey in quality.keys():
print 'Score on Dataset: %s' % setKey
for clf, (score, trainingTime, validationTime) in sorted(quality[setKey].iteritems(),
key=lambda (k, v): v[0]):
print "%.2f%% in %d + %d secs: %s" % \
(100 * score, trainingTime, validationTime, clf)
print ''
secs = time.time() - secs;
print 'Total Time: %d seconds' % secs
return
if __name__ == "__main__":
main()
| bsd-3-clause |
yukoba/EvolutionStrategyLinearModel | src/test4.py | 1 | 1726 | import numpy as np
from sklearn.datasets import load_boston
(x, y) = load_boston(True)
ss_total = ((y - y.mean()) ** 2).sum()
def evaluate(p):
return 1 - ((np.inner(x, p[:-1]) + p[-1] - y) ** 2).sum() / ss_total
iter_count = 10000
pop_size = 100
child_size = 30
params_len = 14
tau = 1.0 / np.sqrt(2.0 * params_len)
with open("../result/4_%d.tsv" % iter_count, "wt") as fp:
for iter in range(100):
print(iter)
individuals = []
for i in range(pop_size):
params = np.random.randn(params_len)
strategies = np.full([params_len], 0.1)
individuals.append((params, strategies, evaluate(params)))
individuals = sorted(individuals, key=lambda ind: -ind[2])
best = individuals[0]
for _ in range(iter_count):
for _ in range(child_size):
# 交叉
if np.random.rand() < 0.8:
ind0 = individuals[np.random.randint(pop_size)]
ind1 = individuals[np.random.randint(pop_size)]
r = np.random.randint(0, 2, [params_len])
parent = (ind0[0] * r + ind1[0] * (1 - r), ind0[1] * r + ind1[1] * (1 - r))
else:
parent = individuals[np.random.randint(pop_size)]
# 突然変異
strategies2 = parent[1] * np.exp(tau * np.random.randn(params_len))
params2 = parent[0] + strategies2 * np.random.randn(params_len)
individuals.append((params2, strategies2, evaluate(params2)))
individuals = sorted(individuals, key=lambda ind: -ind[2])[:pop_size]
best = individuals[0]
print(best[2], file=fp, flush=True)
| mit |
RyuYamamoto/ProbablisticRobotics2016 | main.py | 1 | 1717 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from map_display import *
from QLearning import*
import numpy as np
import matplotlib.pyplot as plt
import sys
import time
MAX_GOAL_TRIAL = 300
if __name__ == "__main__":
args = sys.argv
argc = len(args)
gui_flag = True
map_reward = np.loadtxt('data/map.csv', delimiter=',')
agentQL = AgentQLearning(map_reward,0.1,0.2,0.9)
if 1 < argc:
if args[1] == '--test':
gui_flag = False
else:
map_display = MapDisplay(map_reward)
_trial = 0
while True:
agentQL.learn() #行動学習
if gui_flag is True:
map_display.draw(agentQL.state)
pygame.display.update()
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
if np.array_equal(agentQL.state, np.array(map_reward.shape)-1):
agentQL.goal_count = agentQL.goal_count + 1
print "Goal:", agentQL.goal_count
agentQL.reset()
_trial = _trial + 1
agentQL.trial.append(_trial)
if MAX_GOAL_TRIAL < agentQL.goal_count:
break
#time.sleep(0.02)
#方策の可視化
if gui_flag is True:
map_display.show_policy(agentQL.q)
pygame.display.update()
pygame.image.save(map_display.screen, "picture/policy.jpg")
#Q値の変化
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(agentQL.trial, agentQL.q_value_list, label='Q Value')
ax.set_title("Q Value")
ax.legend(loc='best')
ax.set_xlabel('trial')
ax.set_ylabel('Q')
plt.show()
| mit |
rustychris/stompy | stompy/grid/exact_delaunay.py | 1 | 84123 | # A pure-python, exact delaunay triangulation.
# uses robust_predicates for in-circle tests, follows
# the algorithm of CGAL to the extent possible.
import logging
import pdb
logger = logging.getLogger()
import six
import numpy as np
import matplotlib.pyplot as plt
# do these work in py2?
from ..spatial import robust_predicates
from . import unstructured_grid
from ..utils import (circular_pairs, dist, point_segment_distance, set_keywords,
segment_segment_intersection)
if six.PY3:
def cmp(a,b):
return bool(a>b)-bool(a<b)
try:
from scipy import spatial
except ImportError:
spatial=None
class DuplicateNode(Exception):
pass
class BadConstraint(Exception):
def __init__(self,*a,**k):
super(BadConstraint,self).__init__(*a)
set_keywords(self,k)
class IntersectingConstraints(BadConstraint):
edge=None
class DuplicateConstraint(BadConstraint):
nodes=None
class ConstraintCollinearNode(IntersectingConstraints):
"""
Special case of intersections, when a constraint attempts to
run *through* an existing node
"""
node=None
def ordered(x1,x2,x3):
"""
given collinear points, return true if they are in order
along that line
"""
if x1[0]!=x2[0]:
i=0
else:
i=1
return (x1[i]<x2[i]) == (x2[i]<x3[i])
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i])
class Triangulation(unstructured_grid.UnstructuredGrid):
"""
Mimics the Triangulation_2 class of CGAL.
note that we make some additional assumptions on invariants -
nodes, cells and edges are ordered in a consistent way:
"""
INF_NODE=-666
INF_CELL=unstructured_grid.UnstructuredGrid.UNMESHED
max_sides=3
# local exception types
DuplicateNode=DuplicateNode
IntersectingConstraints=IntersectingConstraints
BadConstraint=BadConstraint
ConstraintCollinearNode=ConstraintCollinearNode
post_check=False # enables [expensive] checks after operations
edge_dtype=(unstructured_grid.UnstructuredGrid.edge_dtype +
[ ('constrained',np.bool8) ] )
def add_node(self,**kwargs):
# will eventually need some caching or indexing to make
# the locate faster. locate() happens first so that
# the mesh complies with invariants and doesn't have a dangling
# node
loc=self.locate(kwargs['x'])
n=super(Triangulation,self).add_node(**kwargs)
self.tri_insert(n,loc)
return n
def modify_node(self,n,_brute_force=False,**kwargs):
"""
_brute_force: if True, move node by delete/add, rather than trying
a short cut.
"""
if 'x' not in kwargs:
return super(Triangulation,self).modify_node(n,**kwargs)
old_rec=self.nodes[n]
# Brute force, removing and re-adding, is no good as the
# constraints are lost.
# A slightly more refined, but still brutish, approach, is to save
# the constraints, delete, add, add constraints.
# be sped up
# handle a common case where the node is only moved a small
# distance, such that we only have to do a small amount of
# work to fix up the triangulation
# if the new location is inside a cell adjacent to n, then
# we can [probably?] move the node
if self.dim()<2:
# the short cuts are only written for the 2D case.
_brute_force=True
if not _brute_force:
# check whether new node location is on the "right" side
# of all existing "opposite" edges (the edge of each cell
# which doesn't contain n.
shortcut=True
if shortcut:
my_cells=self.node_to_cells(n)
for c in my_cells:
c_nodes=self.cells['nodes'][c]
c_xy=self.nodes['x'][c_nodes]
pnts=[]
for i,c_node in enumerate(c_nodes):
if c_node==n:
pnts.append(kwargs['x'])
else:
pnts.append(c_xy[i])
if robust_predicates.orientation(*pnts) <=0:
shortcut=False
if shortcut:
# also check for this node being on the convex hull
# find the pair of edges, if they exist, which have
# n, and have the infinite cell to the left.
he_rev=he_fwd=None
for j in self.node_to_edges(n):
if self.edges['cells'][j,1]==self.INF_CELL:
he=self.halfedge(j,1)
elif self.edges['cells'][j,0]==self.INF_CELL:
he=self.halfedge(j,0)
else:
continue
if he.node_fwd()==n:
he_rev=he
elif he.node_rev()==n:
he_fwd=he
else:
assert False
# can't have just one.
assert (he_rev is None) == (he_fwd is None)
if he_rev is not None:
# need to check that the movement of this node does
# not invalidate the orientation with respect to
# neighboring edges of the convex hull.
# get the five consecutive points, where c is the
# node being moved. make sure that a-b-c and c-d-e
# are properly oriented
cons_idxs=[he_rev.rev().node_rev(),
he_rev.node_rev(),
n,
he_fwd.node_fwd(),
he_fwd.fwd().node_fwd()]
abcde=self.nodes['x'][cons_idxs]
abcde[2]=kwargs['x']
if robust_predicates.orientation(*abcde[:3])>0:
shortcut=False
elif robust_predicates.orientation(*abcde[2:])>0:
shortcut=False
elif robust_predicates.orientation(*abcde[1:4])>0:
shortcut=False
if shortcut:
# short cut should work:
retval=super(Triangulation,self).modify_node(n,**kwargs)
self.restore_delaunay(n)
# when refining the above tests, uncomment this to increase
# the amount of validation
# if self.check_convex_hull():
# pdb.set_trace()
return retval
# but adding the constraints back can fail, in which case we should
# roll back our state, and fire an exception.
constraints_to_replace=[]
for j in self.node_to_edges(n):
if self.edges['constrained'][j]:
constraints_to_replace.append( self.edges['nodes'][j].copy() )
old_x=self.nodes['x'][n].copy() # in case of rollback
self.delete_node(n)
for fld in old_rec.dtype.names:
if fld not in ['x','deleted'] and fld not in kwargs:
kwargs[fld]=old_rec[fld]
new_n=self.add_node(_index=n,**kwargs)
try:
for n1,n2 in constraints_to_replace:
self.add_constraint(n1,n2) # This can fail!
except self.IntersectingConstraints as exc:
self.log.warning("modify_node: intersecting constraints - rolling back")
self.delete_node(n)
kwargs['x']=old_x # move it back to where it started
new_n=self.add_node(_index=n,**kwargs)
for n1,n2 in constraints_to_replace:
self.add_constraint(n1,n2) # This should not fail
# but signal to the caller that the modify failed
raise
assert new_n==n
def add_edge(self,**kw):
""" add-on: cells default to INF_CELL, not -1.
"""
j=super(Triangulation,self).add_edge(**kw)
if 'cells' not in kw:
self.edges[j]['cells'][:]=self.INF_CELL
return j
def choose_start_cell(self,t=None):
""" choose a starting cell for trying to locate where a new vertex
should go. May return INF_CELL if there are no valid cells.
t: can specify a target point which may be used with a spatial index
to speed up the query.
"""
c=0
try:
while self.cells['deleted'][c]:
c+=1
return c
except IndexError:
return self.INF_CELL
IN_VERTEX=0
IN_EDGE=2
IN_FACE=3
OUTSIDE_CONVEX_HULL=4
OUTSIDE_AFFINE_HULL=5
def dim(self):
if len(self.cells) and not np.all(self.cells['deleted']):
return 2
elif len(self.edges) and not np.all(self.edges['deleted']):
return 1
elif len(self.nodes) and not np.all(self.nodes['deleted']):
return 0
else:
return -1
def angle_sort_adjacent_nodes(self,n,ref_nbr=None,topo=True):
if topo:
return self.topo_sort_adjacent_nodes(n,ref_nbr)
else:
return super(Triangulation,self).angle_sort_adjacent_ndoes(n,ref_nbr=ref_nbr)
def topo_sort_adjacent_nodes(self,n,ref_nbr=None):
""" like angle_sort_adjacent_nodes, but relying on topology, not geometry.
"""
nbrs=list(self.node_to_nodes(n))
if len(nbrs)<3:
snbrs=nbrs
else:
he_nbrs = [ self.nodes_to_halfedge(n,nbr)
for nbr in nbrs ]
map_next={}
for he in he_nbrs:
# this doesn't use angle_sort
c=he.cell_opp()
map_next[c] = (he.node_fwd(),he.cell())
trav0=trav=c
snbrs=[]
while 1:
#if len(snbrs)>20: # DBG
# pdb.set_trace()
node,cell = map_next[trav]
snbrs.append(node)
trav=cell
if trav==trav0:
break
if ref_nbr is not None:
i=list(snbrs).index(ref_nbr)
snbrs=np.roll(snbrs,-i)
return snbrs
def locate(self,t,c=None):
""" t: [x,y] point to locate
c: starting cell, if known
return loc=[face,loc_type,loc_index]
face: INF_CELL if t is not on or inside a finite cell
loc_type:
OUTSIDE_AFFINE_HULL: adding this vertex will increase the dimension of the triangulation.
empty triangulation: dim=-1
single vertex: dim=0
collinear edges: dim=1
faces: dim=2
loc_index set to current dimensionality
OUTSIDE_CONVEX_HULL: dimensionality may still be 1 or 2.
if the dimension is 1, then loc_index gives the nearest node
if the dimension is 2, then loc_index gives an adjacent half-edge
IN_VERTEX: t coincides with existing vertex,
if face is finite, then it's a cell containing the vertex, and loc_index
is the index of that vertex in the cell.
if face is INF_CELL, implies dimension<2, and loc_index gives existing node
IN_EDGE: t is collinear with existing edge.
if face is finite, it is a cell containing the edge.
loc_index is the index of the edge itself.
face may be INF_CELL, which implies dimension<2
IN_FACE: t is in the interior of a face. face is the containing cell. loc_index
is not used.
"""
c=c or self.choose_start_cell(t)
prev=None # previous face
# To identify the right orientation of the half-edge, remember
# the ordering of the nodes -- this is CCW ordering from the
# perspective of prev
last_nodes=None
last_edge=None # the edge between c and prev
# Checks for affine hull -
# 3rd element gives the current dimensionality of the affine hull
if self.Nnodes_valid()==0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,-1)
elif self.Nedges_valid()==0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,0)
elif self.Ncells_valid()==0:
return self.locate_1d(t,c)
while True:
if c==self.INF_CELL:
# // c must contain t in its interior
# lt = OUTSIDE_CONVEX_HULL;
# li = c->index(infinite_vertex());
# Changed to give adjacent edge, rather than
# confusing loc_index=4
# loc=(self.INF_CELL,self.OUTSIDE_CONVEX_HULL,last_edge)
# changed again, to give a half-edge
# flip the order because they were in the order with respect
# to the prev face, but now we jumped over last_edge
he=self.nodes_to_halfedge( last_nodes[1],last_nodes[0] )
loc=(self.INF_CELL,self.OUTSIDE_CONVEX_HULL,he)
return loc
p0=self.nodes['x'][self.cells['nodes'][c,0]]
p1=self.nodes['x'][self.cells['nodes'][c,1]]
p2=self.nodes['x'][self.cells['nodes'][c,2]]
prev = c
# Orientation o0, o1, o2;
# nodes are stored in CCW order for the cell.
# 1st edge connects first two nodes
# neighboring cells follow the edges
o0 = robust_predicates.orientation(p0,p1,t)
if o0 == -1: # CW
last_edge=self.cell_to_edges(c)[0]
last_nodes=self.cells['nodes'][c,[0,1]]
c=self.cell_to_cells(c)[0]
continue
o1 = robust_predicates.orientation(p1,p2,t)
if o1 == -1:
last_edge=self.cell_to_edges(c)[1]
last_nodes=self.cells['nodes'][c,[1,2]]
c=self.cell_to_cells(c)[1]
continue
o2 = robust_predicates.orientation(p2,p0,t)
if o2 == -1:
last_edge=self.cell_to_edges(c)[2]
last_nodes=self.cells['nodes'][c,[2,0]]
c=self.cell_to_cells(c)[2]
continue
# must be in or on a face --
break
# For simplicity, I'm skipping some optimizations which avoid re-checking
# the previous edge. see Triangulation_2.h:2616
# now t is in c or on its boundary
o_sum=(o0==0)+(o1==0)+(o2==0)
if o_sum==0:
loc=(c,self.IN_FACE,4)
elif o_sum==1:
if o0==0:
j=0
elif o1==0:
j=1
else:
j=2
# better to consistently return the edge index here, not
# just its index in the cell
loc=(c,self.IN_EDGE,self.cells['edges'][c,j])
elif o_sum==2:
if o0!=0:
loc=(c,self.IN_VERTEX,2)
elif o1!=0:
loc=(c,self.IN_VERTEX,0)
else:
loc=(c,self.IN_VERTEX,1)
else:
assert False
return loc
def locate_1d(self,t,c):
# There are some edges, and t may fall within an edge, off the end,
# or off to the side.
j=six.next(self.valid_edge_iter())
p0=self.nodes['x'][ self.edges['nodes'][j,0] ]
p1=self.nodes['x'][ self.edges['nodes'][j,1] ]
o=robust_predicates.orientation(p0,p1,t)
if o!=0:
return (self.INF_CELL,self.OUTSIDE_AFFINE_HULL,1)
# t is collinear - need to find out whether it's in an edge
# or not
# choose a coordinate which varies along the line
if p0[0]!=p1[0]:
coord=0
else:
coord=1
if (t[coord]<p0[coord]) != (t[coord]<p1[coord]):
return (self.INF_CELL,self.IN_EDGE,j)
# do we need to go towards increasing or decreasing coord?
if (t[coord]<p0[coord]) and (t[coord]<p1[coord]):
direc=-1
else:
direc=1
while True:
# j indexes the edge we just tested.
# p0 and p1 are the endpoints of the edge
# 1. do we want a neighbor of n0 or n1?
if direc*cmp(p0[coord],p1[coord]) < 0: # want to go towards p1
n_adj=self.edges['nodes'][j,1]
else:
n_adj=self.edges['nodes'][j,0]
for jnext in self.node_to_edges(n_adj):
if jnext!=j:
j=jnext
break
else:
# walked off the end of the line -
# n_adj is the nearest to us
return (self.INF_CELL,self.OUTSIDE_CONVEX_HULL,n_adj)
p0=self.nodes['x'][ self.edges['nodes'][j,0] ]
p1=self.nodes['x'][ self.edges['nodes'][j,1] ]
if (t[coord]<p0[coord]) != (t[coord]<p1[coord]):
return (self.INF_CELL,self.IN_EDGE,j)
def tri_insert(self,n,loc):
# n: index for newly inserted node.
# note that loc must already be computed -
# types of inserts:
# on an edge, inside a face, outside the convex hull
# outside affine hull
loc_c,loc_type,loc_idx = loc
if loc_type==self.IN_FACE:
self.tri_insert_in_face(n,loc)
elif loc_type==self.IN_EDGE:
self.tri_insert_in_edge(n,loc)
elif loc_type==self.IN_VERTEX:
raise DuplicateNode()
elif loc_type==self.OUTSIDE_CONVEX_HULL:
self.tri_insert_outside_convex_hull(n,loc)
elif loc_type==self.OUTSIDE_AFFINE_HULL:
self.tri_insert_outside_affine_hull(n,loc)
# for some of those actions, this could be skipped
self.restore_delaunay(n)
def tri_insert_in_face(self,n,loc):
loc_f,loc_type,_ = loc
a,b,c=self.cells['nodes'][loc_f]
self.delete_cell(loc_f)
self.add_edge(nodes=[n,a])
self.add_edge(nodes=[n,b])
self.add_edge(nodes=[n,c])
self.add_cell(nodes=[n,a,b])
self.add_cell(nodes=[n,b,c])
self.add_cell(nodes=[n,c,a])
def tri_insert_in_edge(self,n,loc):
""" Takes care of splitting the edge and any adjacent cells
"""
loc_f,loc_type,loc_edge = loc
self.log.debug("Loc puts new vertex in edge %s"%loc_edge)
cells_to_split=[]
for c in self.edge_to_cells(loc_edge):
if c<0: continue
cells_to_split.append( self.cells[c].copy() )
self.log.debug("Deleting cell on insert %d"%c)
self.delete_cell(c)
# Modify the edge:
a,c=self.edges['nodes'][loc_edge]
b=n
self.delete_edge(loc_edge)
self.add_edge(nodes=[a,b])
self.add_edge(nodes=[b,c])
for cell_data in cells_to_split:
common=[n for n in cell_data['nodes']
if n!=a and n!=c][0]
jnew=self.add_edge(nodes=[b,common])
for replace in [a,c]:
nodes=list(cell_data['nodes'])
idx=nodes.index(replace)
nodes[idx]=b
self.add_cell(nodes=nodes)
def tri_insert_outside_convex_hull(self,n,loc):
dim=self.dim()
if dim==2:
self.tri_insert_outside_convex_hull_2d(n,loc)
elif dim==1:
self.tri_insert_outside_convex_hull_1d(n,loc)
else:
assert False
def tri_insert_outside_convex_hull_1d(self,n,loc):
self.log.debug("tri_insert_outside_convex_hull_1d")
n_adj=loc[2]
self.add_edge(nodes=[n,n_adj])
def tri_insert_outside_convex_hull_2d(self,n,loc):
# HERE:
# the CGAL code is a little funky because of the use of
# infinite vertices and the like.
# the plan here:
# a. change 'locate' to return halfedges instead of just an
# edge. otherwise we'd have to redo the orientation check here.
# b. traverse the half-edge forwards and backwards, accumulating
# lists of adjacent edges which also satisfy the CCW rule.
# c. create triangles with n and the given half-edge, as well as the
# accumulated adjacent edges
# the result then is that the convex hull is built out.
# Triangulation_2.h:1132
assert loc[0]==self.INF_CELL # sanity.
he0=loc[2] # adjacent half-edge
def check_halfedge(he):
nodes=[he.node_rev(),he.node_fwd(),n]
pnts=self.nodes['x'][nodes]
ccw=robust_predicates.orientation(pnts[0],pnts[1],pnts[2])
return ccw>0
assert check_halfedge(he0)
addl_fwd=[]
he=he0.fwd()
while check_halfedge(he):
addl_fwd.append(he)
he=he.fwd()
addl_rev=[]
he=he0.rev()
while check_halfedge(he):
addl_rev.append(he)
he=he.rev()
self.add_edge( nodes=[he0.node_rev(),n] )
self.add_edge( nodes=[he0.node_fwd(),n] )
self.add_cell( nodes=[he0.node_rev(),he0.node_fwd(),n] )
for he in addl_fwd:
self.add_edge( nodes=[he.node_fwd(),n] )
# the second node *had* been ne0.node_fwd(), but that
# was probably a typo.
self.add_cell( nodes=[he.node_rev(),he.node_fwd(),n] )
for he in addl_rev:
self.add_edge( nodes=[he.node_rev(),n] )
# same here.
self.add_cell( nodes=[he.node_rev(),he.node_fwd(),n] )
# 1. Check orientation. Since we get an unoriented edge j_adj,
# all we can do is assert that the points are not collinear.
# 2. loops through faces incident to infinite vertex (?)
# gathering a list of external edges which make a CCW triangle
# with the vertex to insert. stop on the first edge which fails this.
# This is done first traversing CCW, then again traversing CW
# 3. Make the new face with the given edge..
#
def tri_insert_outside_affine_hull(self,n,loc):
self.log.debug("Insert outside affine hull")
loc_face,loc_type,curr_dim = loc
if curr_dim==-1:
self.log.debug(" no nodes, no work")
elif curr_dim==0:
self.log.debug(" simply add edge")
for nbr in self.valid_node_iter():
if nbr != n:
self.add_edge(nodes=[n,nbr])
elif curr_dim==1:
self.log.debug(" add edges and cells")
# the strategy in Triangulation_2.h makes some confusing
# use of the infinite face - take a less elegant, more explicit
# approach here
orig_edges=list(self.valid_edge_iter())
for nbr in self.valid_node_iter():
if nbr != n:
self.add_edge(nodes=[n,nbr])
for j in orig_edges:
n1,n2=self.edges['nodes'][j]
self.add_cell( nodes=[n,n1,n2] )
else:
assert False
def add_cell(self,_force_invariants=True,**kwargs):
if _force_invariants:
nodes=kwargs['nodes']
# Make sure that topological invariants are maintained:
# nodes are ordered ccw.
# edges are populated
# used to assume/force the edges to be sequenced opposite nodes.
# but that is a triangulation-specific assumption, while we're using
# a general unstructured_grid base class. The base class makes
# an incompatible assumption, that the first edge connects the first
# two nodes.
pnts=self.nodes['x'][nodes]
ccw=robust_predicates.orientation(pnts[0],pnts[1],pnts[2])
assert ccw!=0
if ccw<0:
nodes=nodes[::-1]
kwargs['nodes']=nodes
j0=self.nodes_to_edge(nodes[0],nodes[1])
j1=self.nodes_to_edge(nodes[1],nodes[2])
j2=self.nodes_to_edge(nodes[2],nodes[0])
kwargs['edges']=[j0,j1,j2]
c=super(Triangulation,self).add_cell(**kwargs)
# update the link from edges back to cells
for ji,j in enumerate(self.cells['edges'][c]):
# used to attempt to enforce this:
# ji-th edge is the (ji+1)%3,(ji+2)%3 nodes of the cell
# but that's not compatible with checks in unstructured_grid
# but need to know if the edge is in that order or the
# opposite
if self.edges['nodes'][j,0] == self.cells['nodes'][c,ji]:
self.edges['cells'][j,0] = c
else:
self.edges['cells'][j,1] = c
return c
def flip_edge(self,j):
"""
rotate the given edge CCW. requires that triangular cells
exist on both sides of the edge
(that's not a hard and fast requirement, just makes it easier
to implemenet. There *does* have to be a potential cell on either
side).
"""
c_left,c_right=self.edges['cells'][j,:]
self.log.debug("Flipping edge %d, with cells %d, %d nodes %d,%d"%(j,c_left,c_right,
self.edges['nodes'][j,0],
self.edges['nodes'][j,1]) )
assert c_left>=0 # could be relaxed, at the cost of some complexity here
assert c_right>=0
# could work harder to preserve extra info:
#c_left_data = self.cells[c_left].copy()
#c_right_data = self.cells[c_right].copy()
# This is dangerous! - deleting the cells means that topo_sort is no good,
# and that breaks half-edge ops.
# moving to happen a bit later -
# self.delete_cell(c_left)
# self.delete_cell(c_right)
he_left=unstructured_grid.HalfEdge(self,j,0)
he_right=unstructured_grid.HalfEdge(self,j,1)
na,nc = self.edges['nodes'][j]
nd=he_left.fwd().node_fwd()
nb=he_right.fwd().node_fwd()
# DBG
if 0:
for n,label in zip( [na,nb,nc,nd],
"abcd" ):
plt.text( self.nodes['x'][n,0],
self.nodes['x'][n,1],
label)
# keep the time where the cells are deleted to a minimum
self.delete_cell(c_left)
self.delete_cell(c_right)
self.modify_edge(j,nodes=[nb,nd])
new_left =self.add_cell(nodes=[na,nb,nd])
new_right=self.add_cell(nodes=[nc,nd,nb])
return new_left,new_right
def delete_node(self,n):
""" Triangulation version implies cascade, but also
patches up the triangulation
"""
assert n>=0
N=self.Nnodes_valid()
if N==1:
super(Triangulation,self).delete_node(n)
elif N==2:
j=self.node_to_edges(n)[0]
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
elif self.dim()==1:
self.delete_node_1d(n)
else:
self.delete_node_2d(n)
def delete_node_1d(self,n):
# Triangulation_2.h hands this off to the triangulation data structure
# That code looks like:
assert self.dim() == 1
assert self.Nnodes_valid() > 2
# Two cases - either n is at the end of a line of nodes,
# or it's between two nodes.
nbrs=self.node_to_nodes(n)
if len(nbrs)==1: # easy, we're at the end
j=self.nodes_to_edge(n,nbrs[0])
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
else:
assert len(nbrs)==2
j1=self.nodes_to_edge(n,nbrs[0])
j2=self.nodes_to_edge(n,nbrs[1])
self.delete_edge(j1)
self.delete_edge(j2)
super(Triangulation,self).delete_node(n)
self.add_edge( nodes=nbrs )
def test_delete_node_dim_down(self,n):
# see Triangulation_2.h : test_dim_down
# test the dimensionality of the resulting triangulation
# upon removing of vertex v
# it goes down to 1 iff
# 1) any finite face is incident to v
# 2) all vertices are collinear
assert self.dim() == 2
for c in self.valid_cell_iter():
if n not in self.cell_to_nodes(c):
# There is a triangle not involving n
# deleting n would retain a 2D triangulation
return False
pnts=[self.nodes['x'][i]
for i in self.valid_node_iter()
if i!=n]
a,b = pnts[:2]
for c in pnts[2:]:
if robust_predicates.orientation(a,b,c) != 0:
return False
return True
def delete_node_2d(self,n):
if self.test_delete_node_dim_down(n):
# deleting n yields a 1D triangulation - no faces
for c in self.valid_cell_iter():
self.delete_cell(c)
# copy
for j in list(self.node_to_edges(n)):
self.delete_edge(j)
super(Triangulation,self).delete_node(n)
return
# first, make a hole around n
deletee=n
# new way
nbrs=self.angle_sort_adjacent_nodes(deletee)
edges_to_delete=[]
hole_nodes=[]
for nbrA,nbrB in circular_pairs(nbrs):
hole_nodes.append(nbrA)
he=self.nodes_to_halfedge(nbrA,nbrB)
if (he is None) or (he.cell()<0) or (n not in self.cell_to_nodes(he.cell())):
hole_nodes.append('inf')
edges_to_delete.append( self.nodes_to_edge( [deletee,nbrA] ) )
for j in edges_to_delete:
self.delete_edge_cascade(j)
super(Triangulation,self).delete_node(deletee)
# Use the boundary completion approach described in Devillers 2011
# it's not terribly slow, and can be done with the existing
# helpers.
self.fill_hole(hole_nodes)
def fill_hole(self,hole_nodes):
# track potentially multiple holes
# a few place use list-specific semantics - not ndarray
hole_nodes=list(hole_nodes)
holes_nodes=[ hole_nodes ]
while len(holes_nodes):
hole_nodes=holes_nodes.pop()
while 'inf' in hole_nodes[:2]:
hole_nodes = hole_nodes[1:] + hole_nodes[:1]
a,b=hole_nodes[:2]
self.log.debug("Considering edge %d-%d"%(a,b) )
# inf nodes:
# can't test any geometry. seems like we can only have boundary
# faces if the hole included an inf node.
# so drop it from candidates here, but remember that we saw it
# first, sweep through the candidates to test CCW
has_inf=False
c_cand1=hole_nodes[2:]
c_cand2=[]
for c in c_cand1:
if c=='inf':
has_inf=True
elif robust_predicates.orientation( self.nodes['x'][a],
self.nodes['x'][b],
self.nodes['x'][c] ) > 0:
c_cand2.append(c)
self.log.debug("After CCW tests, %s are left"%c_cand2)
while len(c_cand2)>1:
c=c_cand2[0]
for d in c_cand2[1:]:
tst=robust_predicates.incircle( self.nodes['x'][a],
self.nodes['x'][b],
self.nodes['x'][c],
self.nodes['x'][d] )
if tst>0:
self.log.debug("%d was inside %d-%d-%d"%(d,a,b,c))
c_cand2.pop(0)
break
else:
# c passed all the tests
c_cand2=[c]
break
# if the hole nodes are already all convex, then they already
# form the new convex hull - n was on the hull and simply goes
# away
if has_inf and not c_cand2:
c_cand2=['inf']
c='inf' # was this missing??
else:
c=c_cand2[0]
self.log.debug("Decided on %s-%s-%s"%(a,b,c))
# n.b. add_cell_and_edges is probably what is responsible
# for the painless dealing with collinear boundaries.
if c!='inf':
self.add_cell_and_edges( nodes=[a,b,c] )
# what hole to put back on the queue?
if len(hole_nodes)==3:
# finished this hole.
self.log.debug("Hole is finished")
continue
elif c==hole_nodes[2]:
self.log.debug("Hole is trimmed from front")
hole_nodes[:3] = [a,c]
holes_nodes.append( hole_nodes )
elif c==hole_nodes[-1]:
self.log.debug("Hole is trimmed from back")
hole_nodes=hole_nodes[1:] # drop a
self.log.debug(" New hole is %s"%hole_nodes)
holes_nodes.append( hole_nodes )
else:
self.log.debug("Created two new holes")
idx=hole_nodes.index(c)
h1=hole_nodes[1:idx+1]
h2=hole_nodes[idx:] + hole_nodes[:1]
self.log.debug(" New hole: %s"%h1)
self.log.debug(" New hole: %s"%h2)
holes_nodes.append( h1 )
holes_nodes.append( h2 )
# Make a check for the delaunay criterion:
def check_global_delaunay(self):
bad_checks=[] # [ (cell,node),...]
for c in self.valid_cell_iter():
nodes=self.cells['nodes'][c]
pnts=self.nodes['x'][nodes]
# brute force - check them all.
for n in self.valid_node_iter():
if n in nodes:
continue
t=self.nodes['x'][n]
check=robust_predicates.incircle(pnts[0],pnts[1],pnts[2],t)
if check>0:
# how do we check for constraints here?
# maybe more edge-centric?
# tests of a cell on one side of an edge against a node on the
# other is reflexive.
#
# could go through the edges of c,
msg="Node %d is inside the circumcircle of cell %d (%d,%d,%d)"%(n,c,
nodes[0],nodes[1],nodes[2])
self.log.error(msg)
bad_checks.append( (c,n) )
return bad_checks
def check_local_delaunay(self):
""" Check both sides of each edge - can deal with constrained edges.
"""
bad_checks=[] # [ (cell,node),...]
for j in self.valid_edge_iter():
if self.edges['constrained'][j]:
continue
c1,c2 = self.edge_to_cells(j)
if c1<0 or c2<0:
continue
# always check the smaller index -
# might help with caching later on.
c=min(c1,c2)
c_opp=max(c1,c2)
nodes=self.cells['nodes'][c]
pnts=self.nodes['x'][nodes]
# brute force - check them all.
for n in self.cell_to_nodes(c_opp):
if n in nodes:
continue
t=self.nodes['x'][n]
check=robust_predicates.incircle(pnts[0],pnts[1],pnts[2],t)
if check>0:
msg="Node %d is inside the circumcircle of cell %d (%d,%d,%d)"%(n,c,
nodes[0],nodes[1],nodes[2])
self.log.error(msg)
bad_checks.append( (c,n) )
raise Exception('fail')
return bad_checks
def check_orientations(self):
"""
Checks all cells for proper CCW orientation,
return a list of cell indexes of failures.
"""
bad_cells=[]
for c in self.valid_cell_iter():
node_xy=self.nodes['x'][self.cells['nodes'][c]]
if robust_predicates.orientation(*node_xy) <= 0:
bad_cells.append(c)
return bad_cells
def check_convex_hull(self):
# find an edge on the convex hull, walk the hull and check
# all consecutive orientations
e2c=self.edge_to_cells()
for j in self.valid_edge_iter():
if e2c[j,0]==self.INF_CELL:
he=self.halfedge(j,0)
break
elif e2c[j,1]==self.INF_CELL:
he=self.halfedge(j,1)
break
else:
assert False
he0=he
bad_hull=[]
while 1:
a=he.node_rev()
b=he.node_fwd()
he=he.fwd()
c=he.node_fwd()
if robust_predicates.orientation(*self.nodes['x'][[a,b,c]])>0:
bad_hull.append( [a,b,c])
if he==he0:
break
return bad_hull
def restore_delaunay(self,n):
""" n: node that was just inserted and may have adjacent cells
which do not meet the Delaunay criterion
"""
# n is node for Vertex_handle v
if self.dim() <= 1:
return
# a vertex is shared by faces, but "stores" only one face.
# Face_handle f=v->face();
# This code iterates over the faces adjacent to v
# in ccw order.
# Face_handle next;
# int i;
# Face_handle start(f);
# do {
# i = f->index(v);
# next = f->neighbor(ccw(i)); // turn ccw around v
# propagating_flip(f,i);
# f=next;
# } while(next != start);
# Shaky on the details, but for starters, try marking the CCW sweep
# based on neighbor nodes.
nbr_nodes=self.angle_sort_adjacent_nodes(n)
N=len(nbr_nodes)
for i in range(N):
trav=nbr_nodes[i]
trav_next=nbr_nodes[(i+1)%N]
c=self.nodes_to_cell( [n,trav,trav_next],fail_hard=False)
if c is not None:
for i in [0,1,2]:
if self.cells['nodes'][c,i]==n:
break
else:
assert False
if c is not None:
self.propagating_flip(c,i)
if self.post_check:
bad=self.check_local_delaunay()
if bad:
raise self.GridException("Delaunay criterion violated")
def propagating_flip(self,c,i):
# this is taken from non_recursive_propagating_flip
# c: cell, akin to face_handle
# i: index of the originating vertex in cell c.
# track the stack based on the halfedge one place CW
# from the edge to be flipped.
edges=[] # std::stack<Edge> edges;
vp = self.cells['nodes'][c,i] # const Vertex_handle& vp = f->vertex(i);
p=self.nodes['x'][vp] # const Point& p = vp->point();
# maybe better to use half-edges here.
# ordering of edges is slightly different than CGAL.
# if i gives the vertex,
# edges.push(Edge(f,i)); # this is the edge *opposite* vp
# for our ordering, need edge i+1
edges.append( self.cell_to_halfedge(c,i) )
while edges: # (! edges.empty()){
#const Edge& e = edges.top()
he=edges[-1]
he_flip=he.fwd()
# not sure about this part:
if self.edges['constrained'][he_flip.j]:
edges.pop()
continue
nbr=he_flip.cell_opp()
if nbr>=0:
# assuming that ON_POSITIVE_SIDE would mean that p (the location of the
# originating vertex) is *inside* the CCW-defined circle of the neighbor
# and would thus mean that the delaunay criterion is not satisfied.
#if ON_POSITIVE_SIDE != side_of_oriented_circle(n, p, true):
nbr_points= self.nodes['x'][ self.cells['nodes'][nbr] ]
p_in_nbr = robust_predicates.incircle(nbr_points[0],
nbr_points[1],
nbr_points[2],
p )
#if side_of_oriented_circle(n, p, true) == ON_POSITIVE_SIDE:
if p_in_nbr > 0:
self.flip_edge(he_flip.j)
extra=he.rev().opposite()
edges.append(extra)
continue
edges.pop() # drops last item
continue
def find_intersected_elements(self,nA,nB):
"""
returns a history of the elements traversed.
this includes:
('node',<node index>)
('edge',<half edge>)
('cell',<cell index>)
note that traversing along an edge is not included - but any
pair of nodes in sequence implies an edge between them.
"""
assert nA!=nB
assert not self.nodes['deleted'][nA]
assert not self.nodes['deleted'][nB]
# traversal could encounter multiple types of elements
trav=('node',nA)
A=self.nodes['x'][nA]
B=self.nodes['x'][nB]
history=[trav]
if self.dim()==1:
assert trav[0]=='node'
n_nbrs=self.node_to_nodes(trav[1])
for n_nbr in n_nbrs:
if n_nbr==nB:
history.append( ('node',nB) )
return history
if ordered( A,
self.nodes['x'][n_nbr],
B ):
trav=('node',n_nbr)
history.append( trav )
he=self.nodes_to_halfedge(nA,n_nbr)
break
else:
assert False # should never get here
while trav!=('node',nB):
he=he.fwd()
trav=('node',he.node_fwd())
history.append(trav)
return history
else:
while trav!=('node',nB):
# DBG!
if len(history)>1 and history[0]==history[1]:
import pdb
pdb.set_trace()
if trav[0]=='node':
ntrav=trav[1]
for c in self.node_to_cells(ntrav):
cn=self.cell_to_nodes(c)
# print "At node %d, checking cell %d (%s)"%(ntrav,c,cn)
ci_trav=list(cn).index(ntrav) # index of ntrav in cell c
nD=cn[(ci_trav+1)%3]
nE=cn[(ci_trav+2)%3]
if nD==nB or nE==nB:
trav=('node',nB)
# print "Done"
break
D=self.nodes['x'][nD]
oD=robust_predicates.orientation( A,B,D )
if oD>0:
continue
N=self.nodes['x'][ntrav]
if oD==0 and ordered(N,D,B):
# fell exactly on the A-B segment, and is in the
# right direction
trav=('node',nD)
break
E=self.nodes['x'][nE]
oE=robust_predicates.orientation( A,B,E )
if oE<0:
continue
if oE==0 and ordered(N,E,B):
# direction
trav=('node',nE)
break
j=self.cell_to_edges(c)[ (ci_trav+1)%3 ]
j_nbrs=self.edge_to_cells(j)
# AB crosses an edge - record the edge, and the side we are
# approaching from:
history.append( ('cell',c) )
if j_nbrs[0]==c:
trav=('edge',self.halfedge(j,0))
# making sure I got the 0/1 correct
assert trav[1].cell()==c
break
elif j_nbrs[1]==c:
trav=('edge',self.halfedge(j,1))
# ditto
assert trav[1].cell()==c
break
assert False
elif trav[0]=='edge':
he=trav[1].opposite()
#jnodes=self.edges['nodes'][j]
# have to choose between the opposite two edges or their common
# node:
c_next=he.cell()
history.append( ('cell',c_next) )
nD=he.fwd().node_fwd()
# print "Entering cell %d with nodes %s"%(c_next,self.cell_to_nodes(c_next))
oD=robust_predicates.orientation( A,B, self.nodes['x'][nD] )
if oD==0:
trav=('node',nD)
elif oD>0:
# going to cross
trav=('edge',he.fwd())
else:
trav=('edge',he.rev())
else:
assert False
history.append(trav)
return history
def locate_for_traversal_outside(self,p,p_other,loc_face,loc_type,loc_index):
"""
Helper method for locate_for_traversal()
handle the case where p is outside the triangulation, so loc_type
is either OUTSIDE_AFFINE_HULL or OUTSIDE_CONVEX_HULL
returns
('edge',<half-edge>)
('node',<node>)
(None,None) -- the line between p and p_other doesn't intersect the triangulation
"""
dim=self.dim()
if dim<0:
# there are no nodes, no work to be done
return (None,None)
elif dim==0:
# a single node. either we'll intersect it, or not.
N=six.next(self.valid_node_iter()) # get the only valid node
pN=self.nodes['x'][N]
# p_other could be coincident with N:
if (pN[0]==p_other[0]) and (pN[1]==p_other[1]):
return ('node',N)
# or we have to test for pN falling on the line between p,p_other
oN=robust_predicates.orientation(p, pN, p_other)
# either the segment passes through the one node, or doesn't intersect
# at all:
if oN==0 and ordered(p, pN, p_other):
return ('node',N)
else:
return (None,None)
elif dim==1:
# This could be much smarter, but current use case has this as a rare
# occasion, so just brute force it. find a half-edge, make sure it points
# towards us, and go.
if loc_type==self.OUTSIDE_AFFINE_HULL:
# we know that p is not on the line, but p_other could be.
# get an edge:
j=six.next(self.valid_edge_iter())
he=self.halfedge(j,0)
# get a half-edge facing p:
oj=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
assert oj!=0.0 # that would mean we're collinear
# if the left side of he is facing us,
if oj>0:
# good - the left side of he, from rev to fwd, is facing p.
pass
else:
# flip it.
he=he.opposite()
# first - check against p_other - it could be on the same side
# of the line, on the line, or on the other side of the line.
ojo=robust_predicates.orientation(p_other,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
if ojo>0:
# p_other is on the same side of the line as p
return (None,None)
elif ojo==0:
# still have to figure out whether p_other is in the line or
# off the end.
o_loc_face,o_loc_type,o_loc_index=self.locate(p_other)
# just saw that it was in line, so better not be outside affine hull
assert o_loc_type!=self.OUTSIDE_AFFINE_HULL
if o_loc_type==self.OUTSIDE_CONVEX_HULL:
# a point off the line to a point beyond the ends of the line -
# no intersection.
return (None,None)
else:
if o_loc_type==self.IN_VERTEX:
return ('node',o_loc_index)
elif o_loc_type==self.IN_EDGE:
# This had been just returning the index, but we should
# be return half-edge.
# Make sure it faces p:
he=self.halfedge(o_loc_index,0)
oj2=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
self.nodes['x'][he.node_fwd()])
assert oj2!=0.0 # that would mean we're collinear
# if the left side of he is facing us,
if oj2>0:
# good - the left side of he, from rev to fwd, is facing p.
pass
else:
# flip it.
he=he.opposite()
return ('edge',he)
# shouldn't be possible
assert False
else: # p_other is on the other side
o_rev=robust_predicates.orientation(p,
self.nodes['x'][he.node_rev()],
p_other)
if o_rev==0.0:
return ('node',he.node_rev())
if o_rev > 0:
# rev is to the right of the p--p_other line,
# so walk forward...
A=p ; B=p_other
else:
# flip it around to keep the loop logic the same.
# note that this results in one extra loop, since rev
# becomes fwd and we already know that rev is not
# far enough over. whatever.
A=p_other ; B=p
he=he.opposite()
while 1:
n_fwd=he.node_fwd()
o_fwd=robust_predicates.orientation(A,
self.nodes['x'][n_fwd],
B)
if o_fwd==0.0:
return ('node',n_fwd)
if o_fwd<0:
return ('edge',he) # had been he.j, but we should return half-edge
# must go further!
he_opp=he.opposite()
he=he.fwd()
if he == he_opp: # went round the end - no intersection.
return (None,None)
else: # OUTSIDE_CONVEX_HULL
# points are in a line, and we're on that line but off the end.
# in this case, loc_index gives a nearby node
# so either p_other is also on the line, and the answer
# is ('node',loc_index)
# or it's not on the line, and the answer is (None,None)
orient = robust_predicates.orientation(p,
self.nodes['x'],
p_other)
if orient!=0.0:
return (None,None)
if ordered(p,self.nodes['x'][loc_index],p_other):
return ('node',loc_index)
else:
return (None,None)
elif dim==2:
# use that to get a half-edge facing p...
# had done this, but loc_index is already a half edge
# he_original = he = self.halfedge(loc_index,0)
he_original = he = loc_index
# make sure we got the one facing out
if he.cell()>=0:
he=he.opposite()
assert he.cell()<0
# brute force it
while 1:
# does this edge, or one of it's nodes, fit the bill?
n_rev=he.node_rev()
n_fwd=he.node_fwd()
o_j=robust_predicates.orientation(p,
self.nodes['x'][n_rev],
self.nodes['x'][n_fwd])
if o_j<0:
# this edge is facing away from p - not a candidate.
pass
else:
# note that we could be collinear, o_j==0.0.
o_rev=robust_predicates.orientation(p,self.nodes['x'][n_rev],p_other)
o_fwd=robust_predicates.orientation(p,self.nodes['x'][n_fwd],p_other)
if o_rev == 0.0:
if o_fwd == 0.0:
assert o_j==0.0
if ordered(p,self.nodes['x'][n_rev],self.nodes['x'][n_fwd]):
return ('node',n_rev)
else:
return ('node',n_fwd)
else:
return ('node',n_rev)
elif o_rev>0:
if o_fwd<0:
# found the edge!
return ('edge',he) # had been he.j
elif o_fwd==0:
return ('node',n_fwd)
else:
# the whole edge is on the wrong side of the segment
pass
else: # o_rev<0
pass
he=he.fwd()
if he==he_original:
# none satisfied the intersection
return (None,None)
def locate_for_traversal(self,p,p_other):
""" Given a point [x,y], reformat the result of
self.locate() to be compatible with the traversal
algorithm below. In cases where p is outside the
existing cells/edges/nodes, use the combination of p and p_other
to figure out the first element which would be hit.
"""
# Here - figure out which cell, edge or node corresponds to pB
loc_face,loc_type,loc_index=self.locate(p)
# not ready for ending point far away, outside
if loc_type in [self.OUTSIDE_AFFINE_HULL,self.OUTSIDE_CONVEX_HULL]:
return self.locate_for_traversal_outside(p,p_other,loc_face,loc_type,loc_index)
elif loc_type == self.IN_VERTEX:
if loc_face == self.INF_CELL:
feat=('node', loc_index)
else:
feat=('node', self.cells['nodes'][loc_face, loc_index])
elif loc_type == self.IN_EDGE:
# This should be a half-edge.
# The half-edge is chosen such that it either faces p_other, or
# if all four points are collinear, the ordering is rev -- p -- fwd -- p_other
# or rev -- p -- p_other -- fwd.
he=self.half_edge(loc_index,0) # start with arbitrary orientation
p_rev,p_fwd = self.nodes['x'][ he.nodes() ]
o_p_other = robust_predicates.orientation(p_other, p_rev, p_fwd)
if o_p==0.0:
# should this use rel_ordered instead?
if ordered(p_rev,p,p_other):
# good - we're looking along, from rev to fwd
pass
else:
he=he.opposite()
elif o_p<0:
he=he.opposite()
else:
pass
feat=('edge', he)
elif loc_type == self.IN_FACE:
feat=('cell', loc_face)
else:
assert False # shouldn't happen
return feat
def gen_intersected_elements(self,nA=None,nB=None,pA=None,pB=None):
"""
This is a new take on find_intersected_elements, with changes:
1. Either nodes or arbitrary points can be given
2. Elements are returned as a generator, rather than compiled into a list
and returned all at once.
3. Traversing along an edge was implied in the output of find_intersected_elements,
but is explicitly included here as a node--half_edge--node sequence.
returns a history of the elements traversed.
this includes:
('node',<node index>)
('edge',<half edge>)
('cell',<cell index>)
Notes:
The starting and ending features are included. If points were given
instead of nodes, then the feature here may be a cell, edge or node.
When the point is outside the convex hull or affine hull, then there is not a
corresponding feature (since otherwise one would assume that the feature
is truly intersected). The first feature returned is simply the first feature
encountered along the path, necessarily an edge or node, not a face.
"""
# verify that it was called correctly
if (nA is not None) and (nB is not None):
assert nA!=nB
assert (nA is None) or (not self.nodes['deleted'][nA])
assert (nB is None) or (not self.nodes['deleted'][nB])
assert (nA is None) != (pA is None)
assert (nB is None) != (pB is None)
dim=self.dim()
if nA is not None:
A=self.nodes['x'][nA]
trav=('node',nA)
else:
A=pA # trav set below
if nB is not None:
B=self.nodes['x'][nB]
end=('node',nB)
else:
B=pB # trav set below
if nA is None:
trav=self.locate_for_traversal(A,B)
if trav[0] is None:
return # there are not intersections
if nB is None:
end=self.locate_for_traversal(B,A)
# but the orientation of an edge has to be flipped
if end[0]=='edge':
end=(end[0],end[1].opposite())
# keep tracks of features crossed, including starting/ending
assert trav[0] is not None
history=[trav]
yield trav
if trav==end:
return
if dim==0:
# already yielded the one possible intersection
# but this case should be caught by the return just above
assert False
return
elif dim==1:
# in the case where p -- p_other crosses the 1-dimensional set of
# nodes, trav==end, and we already returned above.
# otherwise, we walk along the edges and nodes
if trav[0]=='node': # get a first half-edge going in the correct direction
n_nbrs=self.node_to_nodes(trav[1])
for n_nbr in n_nbrs:
if (ordered( A,
self.nodes['x'][n_nbr],
B ) or
np.all(B==self.nodes['x'][n_nbr])):
he=self.nodes_to_halfedge(nA,n_nbr)
break
else:
assert False
trav=('edge',he)
history.append(trav)
yield trav
else:
assert trav[0]=='edge'
he=trav[1]
while trav != end:
trav=('node',he.node_fwd())
history.append(trav)
yield trav
if trav==end:
break
he=he.fwd()
trav=('edge',he)
history.append(trav)
yield trav
return
else: # dim==2
while trav!=end:
if trav[0]=='node':
# Crossing through a node
ntrav=trav[1]
N=self.nodes['x'][ntrav]
for c in self.node_to_cells(ntrav):
cn=self.cell_to_nodes(c)
# print "At node %d, checking cell %d (%s)"%(ntrav,c,cn)
ci_trav=list(cn).index(ntrav) # index of ntrav in cell c
# the other two nodes of the cell
nD=cn[(ci_trav+1)%3]
nE=cn[(ci_trav+2)%3]
# maybe this can be folded in below
#if end[0]=='node' and (end[1] in [nD,nE]):
# # trav=('node',nB)
# trav=end
# break
# Here
D=self.nodes['x'][nD]
oD=robust_predicates.orientation( A,B,D )
if oD>0:
# D is to the right of E, and our target, A is to the right
# of both, so this cell is not good
continue
if oD==0 and np.dot(B-A,D-N)>0: # ordered(A,N,D):
# used to test for ordered(N,D,B), but B could be on the
# edge, at D, or beyond D. Test with A to know that the
# edge is going in the right direction, then check for where
# B might fall.
# HERE: This is a problem, though, because it's possible for
# A==N.
# What I really want is for A-B to be in the same direction
# as N-D.
# could test a dot product, but that invites some roundoff
# in sinister situations. The differencing is probably not
# a big deal - if we can represent the absolute values
# distinctly, then we can certainly represent their differences.
# the multiplication could lead to numbers which are too small
# to represent. Any of these issues require absurdly small
# values/offsets in the input nodes, and we have already
# established that these all lie on a line and are distinct.
#
# The possible positive orderings
# [A=N] -- D -- B
# A -- N -- D -- B
# [A=N] -- [D==B]
# [A=N] -- B -- D
#
# fell exactly on the A-B segment, and is in the
# right direction
# Announce the edge, which could be the end of the traversal
trav=('edge',self.nodes_to_halfedge(ntrav,nD))
history.append(trav)
yield trav
if trav==end:
return
# And on to the node:
trav=('node',nD)
break # and we've completed this step
E=self.nodes['x'][nE]
oE=robust_predicates.orientation( A,B,E )
if oE<0:
# A is to the left of E
continue
if oE==0 and np.dot(B-A,E-N): # ordered(A,N,E):
# Same as above - establish that it goes in the correct direction.
# again, the dot product is mildly dangerous
# again - fell exactly on the segment A-B, it's in the right
# direction.
trav=('edge',self.nodes_to_halfedge(ntrav,nE))
history.append(trav)
yield trav
if trav==end:
return
trav=('node',nE)
break
# if we get to here, then A--B passes through the cell, and either
# we stop at this cell, or A--B crosses the opposite edge:
trav=('cell',c)
if trav==end:
# don't try to traverse the cell - we're done!
# trav will get appended below
break
else:
# announce the cell, and move on to the edge
history.append(trav)
yield trav
trav=None # avoid confusion, clear this out
# AB crosses an edge - record the edge, and the side we are
# approaching from:
j=self.cell_to_edges(c)[ (ci_trav+1)%3 ]
j_nbrs=self.edge_to_cells(j)
if j_nbrs[0]==c:
trav=('edge',self.halfedge(j,0))
elif j_nbrs[1]==c:
trav=('edge',self.halfedge(j,1))
else:
assert False
# making sure I got the 0/1 correct
assert trav[1].cell()==c
break
elif trav[0]=='edge':
# trav[1].cell() is the cell we just left
# this then is the half-edge facing the cell we're
# entering
he=trav[1].opposite()
c_next=he.cell()
trav=('cell',c_next)
if trav==end:
pass # done!
else:
# have to choose between the opposite two edges or their common
# node.
# record the cell we just passed through
history.append(trav)
yield trav
nD=he.fwd().node_fwd()
# print "Entering cell %d with nodes %s"%(c_next,self.cell_to_nodes(c_next))
oD=robust_predicates.orientation( A,B, self.nodes['x'][nD] )
if oD==0:
trav=('node',nD)
elif oD>0:
# going to cross the edge "on the right" (I think)
trav=('edge',he.fwd())
else:
# going to cross the edge "on the left"
trav=('edge',he.rev())
else:
assert False
history.append(trav)
yield trav
return
def add_constraint(self,nA,nB):
jAB=self.nodes_to_edge([nA,nB])
if jAB is not None:
# no work to do - topology already good.
if self.edges['constrained'][jAB]:
raise DuplicateConstraint(nodes=[nA,nB])
self.edges['constrained'][jAB]=True
return jAB
# inserting an edge from 0-5.
int_elts=self.find_intersected_elements(nA,nB)
# Now we need to record the two holes bordered the new edge:
left_nodes=[nA] # will be recorded CW
right_nodes=[nA] # will be recorded CCW
# Iterate over the crossed elements, checking that the new
# edge doesn't encounter any collinear nodes or other constrained
# edges. Build up the nodes of the holes at the same time.
dead_cells=[]
dead_edges=[]
for elt in int_elts[1:-1]:
if elt[0]=='node':
raise self.ConstraintCollinearNode("Constraint intersects a node",
node=elt[1])
if elt[0]=='cell':
dead_cells.append(elt[1])
if elt[0]=='edge':
if self.edges['constrained'][ elt[1].j ]:
raise IntersectingConstraints("Constraint intersects a constraint",
edge=elt[1].j )
next_left=elt[1].node_fwd()
if left_nodes[-1]!=next_left:
left_nodes.append(next_left)
next_right= elt[1].node_rev()
if right_nodes[-1]!=next_right:
right_nodes.append(next_right)
dead_edges.append(elt[1].j)
left_nodes.append(nB)
right_nodes.append(nB)
left_nodes = left_nodes[::-1]
# tricky business here
# but the delaunay business is only invoked on node operations - leaving
# the edge/cell operations free and clear to violate invariants
for c in dead_cells:
self.delete_cell(c)
for j in dead_edges:
self.delete_edge(j)
j=self.add_edge(nodes=[nA,nB],constrained=True)
# and then sew up the holes!
self.fill_hole( left_nodes )
self.fill_hole( right_nodes )
return j
def remove_constraint(self,nA=None,nB=None,j=None):
""" Assumes that there exists a constraint between nodes
nA and nB (or that the edge given by j is constrained).
The constrained flag is removed for the edge, and if
the Delaunay criterion is no longer satisfied edges are
flipped as needed.
"""
if j is None:
j=self.nodes_to_edge([nA,nB])
assert self.edges['constrained'][j]
self.edges['constrained'][j]=False
c1,c2=self.edge_to_cells(j)
if (c1>=0) and (c2>=0):
c=c1 # can we just propagate from one side?
for ni,n in enumerate(self.cell_to_nodes(c1)):
if n not in self.edges['nodes'][j]:
self.propagating_flip(c1,ni)
break
if self.post_check:
self.check_local_delaunay()
def node_to_constraints(self,n):
return [j
for j in self.node_to_edges(n)
if self.edges['constrained'][j]]
def init_from_grid(self,g,node_coordinate='x',set_valid=False,
valid_min_area=1e-2,on_intersection='exception'):
"""
Initialize from the nodes and edges of an existing grid, making
existing edges constrained
node_coordinate: supply the name of an alternate coordinate defined
on the nodes. g.nodes[node_coordinate] should be an [Ncell,2] field.
set_valid: if True, add a 'valid' field for cells, and set to Tru
for cells of the triangulation that have finite area and fall
within the src grid g.
on_intersection:
'exception': intersecting edges in the input grid raise an error.
'insert': at intersecting edges construct and insert a new node.
"""
if set_valid:
self.add_cell_field('valid',np.zeros(self.Ncells(),np.bool8),
on_exists='pass')
# Seems that the indices will get misaligned if there are
# deleted nodes.
# TODO: add node index mapping code here.
assert np.all( ~g.nodes['deleted'] )
self.bulk_init(g.nodes[node_coordinate][~g.nodes['deleted']])
all_segs=[ g.edges['nodes'][j]
for j in g.valid_edge_iter() ]
while all_segs:
nodes=all_segs.pop(0)
if on_intersection=='exception':
self.add_constraint( *nodes )
else:
self.add_constraint_and_intersections( *nodes )
if set_valid:
from shapely import geometry
self.cells['valid']=~self.cells['deleted']
# Maybe unnecessary. Had some issues with 0 fill values here.
self.cells['_area']=np.nan
self.cells['_center']=np.nan
areas=self.cells_area()
self.cells['valid'][areas<=valid_min_area]=False
poly=g.boundary_polygon()
centroids=self.cells_centroid()
for c in np.nonzero(self.cells['valid'])[0]:
if not poly.contains( geometry.Point(centroids[c]) ):
self.cells['valid'][c]=False
def add_constraint_and_intersections(self,nA,nB,on_exists='exception'):
"""
Like add_constraint, but in the case of intersections with existing constraints
insert new nodes as needed and update existing and new constrained edges.
"""
all_segs=[ [nA,nB] ]
result_nodes=[nA]
result_edges=[]
while all_segs:
nA,nB=all_segs.pop(0)
try:
j=self.add_constraint(nA,nB)
except IntersectingConstraints as exc:
if isinstance(exc,ConstraintCollinearNode):
all_segs.insert(0, [nA,exc.node] )
all_segs.insert(1, [exc.node,nB] )
continue
else:
j_other=exc.edge
assert j_other is not None
segA=self.nodes['x'][self.edges['nodes'][j_other]]
segB=self.nodes['x'][[nA,nB]]
x_int,alphas=segment_segment_intersection(segA,segB)
# Getting an error where x_int is one of the endpoints of
# segA. This is while inserting a contour that ends on
# the boundary.
n_new=self.split_constraint(j=j_other,x=x_int)
if nB!=n_new:
all_segs.insert(0,[n_new,nB])
if nA!=n_new:
all_segs.insert(0,[nA,n_new])
continue
except DuplicateConstraint as exc:
if on_exists=='exception':
raise
elif on_exists=='ignore':
j=self.nodes_to_edge(nA,nB)
elif on_exists=='stop':
break
else:
assert False,"Bad value %s for on_exists"%on_exists
result_nodes.append(nB)
assert j is not None
result_edges.append(j)
return result_nodes,result_edges
def split_constraint(self,x,j):
nodes_other=self.edges['nodes'][j].copy()
j_data=unstructured_grid.rec_to_dict(self.edges[j].copy())
self.remove_constraint(j=j)
n_new=self.add_or_find_node(x=x)
js=[]
if nodes_other[0]!=n_new:
js.append( self.add_constraint(nodes_other[0],n_new) )
if n_new!=nodes_other[1]:
js.append( self.add_constraint(n_new,nodes_other[1]) )
for f in j_data:
if f in ['nodes','cells','deleted']: continue
self.edges[f][js]=j_data[f]
return n_new
def add_constrained_linestring(self,coords,
on_intersection='exception',
on_exists='exception',
closed=False):
"""
Optionally insert new nodes as needed along
the way.
on_intersection: when a constraint intersects an existing constraint,
'exception' => re-raise the exception
'insert' => insert a constructed node, and divide the new and old constraints.
on_exists' => when a constraint to be inserted already exists,
'exception' => re-raise the exception
'ignore' => keep going
'stop' => return
closed: Whether the first and last nodes are also connected
returns [list of nodes],[list of edges]
"""
nodes=[self.add_or_find_node(x=x)
for x in coords]
result_nodes=[nodes[0]]
result_edges=[]
if not closed:
ab_list=zip(nodes[:-1],nodes[1:])
else:
ab_list=zip(nodes,np.roll(nodes,-1))
for a,b in ab_list:
if on_intersection=='insert':
sub_nodes,sub_edges=self.add_constraint_and_intersections(a,b,
on_exists=on_exists)
result_nodes+=sub_nodes[1:]
result_edges+=sub_edges
if (on_exists=='stop') and (sub_nodes[-1]!=b):
print("Stopping early")
break
else:
try:
j=self.add_constraint(a,b)
except DuplicateConstraint as exc:
if on_exists=='exception':
raise
elif on_exists=='stop':
break
elif on_exists=='ignore':
j=self.nodes_to_edge(a,b)
result_nodes.append(b)
result_edges.append(j)
return result_nodes,result_edges
def bulk_init_slow(self,points):
raise Exception("No - it's really slow. Don't do this.")
def bulk_init(self,points): # ExactDelaunay
if spatial is None:
return self.bulk_init_slow(points)
# looks like centering this affects how many cells Delaunay
# finds. That's lame.
sdt = spatial.Delaunay(points-points.mean(axis=0))
self.nodes=np.zeros( len(points), self.node_dtype)
self.cells=np.zeros( sdt.vertices.shape[0], self.cell_dtype)
self.nodes['x']=points
self.cells['nodes']=sdt.vertices
# looks like it's CGAL style:
# neighbor[1] shares nodes[0] and nodes[2]
# vertices are CCW
for c in range(self.Ncells()):
for i,(a,b) in enumerate(circular_pairs(self.cells['nodes'][c])):
# first time - that would be i=0, and the first two nodes.
# but for neighbors, it's indexed by the opposite node. so the edge
# connected the nodes[0]--nodes[1] corresponds with neighbor 2.
c_nbr=sdt.neighbors[c,(i+2)%3]
# c_nbr==-1 on convex hull.
# only worry about cases where c is larger.
if c<c_nbr:
continue
if c_nbr<0:
c_nbr=self.INF_CELL
j=self.add_edge(nodes=[a,b],
cells=[c,c_nbr])
# and record in the cell, too
self.cells['edges'][c,i]=j
if c_nbr!=self.INF_CELL:
nbr_nodes=self.cells['nodes'][c_nbr]
for i_nbr in [0,1,2]:
if nbr_nodes[i_nbr]==b and nbr_nodes[(i_nbr+1)%3]==a:
self.cells['edges'][c_nbr,i_nbr]=j
break
else:
assert False
def constrained_centers(self):
"""
For cells with no constrained edges, return the circumcenter.
If return centroid.
The details may evolve, but the purpose is to get a point which
is inside the domain and can be used like a circumcenter (i.e.
approximately lies on the medial axis of the continous boundary).
"""
ccs=self.cells_center(refresh=True) # circumcenters
centroids=self.cells_centroid()
e2c=self.edge_to_cells() # recalc=True)
cell_with_constraint=np.unique( e2c[ self.edges['constrained']] )
result=ccs.copy()
result[cell_with_constraint] = centroids[cell_with_constraint]
return result
# TODO: def constrained_radii(self):
# Calculate the usual circumradius, but for centers which were
# adjusted due to a constrained edge also check point-segment
# distances.
def point_clearance(self,x,hint=None):
"""
Return the distance from point x=[p_x,p_y] to the nearest
node or constrained segment of the triangulation.
hint: To speed up consecutive queries with spatial locality, pass
a dictionary, and a new dictionary will be returned as the second
item in a tuple. The initial dictionary can be empty, or 'c':int
to give a starting face of the triangulation.
"""
if hint is not None:
loc_face,loc_type,loc_index=self.locate(x,**hint)
else:
loc_face,loc_type,loc_index=self.locate(x)
assert loc_type in (self.IN_VERTEX, self.IN_EDGE, self.IN_FACE)
face_nodes=self.cells['nodes'][loc_face]
min_clearance=dist( self.nodes['x'][face_nodes], x ).min()
for j in self.cell_to_edges(loc_face):
if self.edges['constrained'][j]:
j_clearance=point_segment_distance(x, self.nodes['x'][self.edges['nodes'][j]] )
min_clearance=min(min_clearance,j_clearance)
if hint is not None:
return min_clearance,{'c':loc_face}
else:
return min_clearance
# Issues:
# Calls like edge_to_cells do not scale well right now. In particular,
# it would be better in this code to always specify the edge, so that
# a full scan isn't necessary.
| mit |
jayflo/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
flexdigit/GPIO | read_1day.py | 1 | 2760 | #!/usr/bin/python
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
# Connect DB create cursor
connection = sqlite3.connect("Gasmeter.db")
cursor = connection.cursor()
# SQL-Query
sql = """select tstamp,
case cast (strftime('%H', tstamp) as integer)
when 00 then '0'
when 01 then '1'
when 02 then '2'
when 03 then '3'
when 04 then '4'
when 05 then '5'
when 06 then '6'
when 07 then '7'
when 08 then '8'
when 09 then '9'
when 10 then '10'
when 11 then '11'
when 12 then '12'
when 13 then '13'
when 14 then '14'
when 15 then '15'
when 16 then '16'
when 17 then '17'
when 18 then '18'
when 19 then '19'
when 20 then '20'
when 21 then '21'
when 22 then '22'
when 23 then '23'
when 24 then '24'
else 'fehler' end,
sum(tick) from gascounter where date(tstamp) = date('now', '-9 days')
GROUP BY strftime('%H', tstamp)
ORDER BY tstamp"""
# Values from today:
#select tstamp, sum(tick) from gascounter where date(tstamp) = date('now')
# from yesterday:
#select tstamp, sum(tick) from gascounter where date(tstamp) = date('now', '-1 days')
# dispatch the SQL-Query
cursor.execute(sql)
# dsatz[0] is tstamp (date + time)
# dsatz[1] is hour of the day (0, 1, 2,...)
# dsatz[2] is sum of the ticks
date_list = [] # will be the list for the date
h_per_day_list = [] # will be list for hours per day
Gas_consume_list = [] # will be list for Gas consume
daily_amount = 0
for dsatz in cursor:
tmp = dsatz[0].split(" ")
# tmp[0] is date
# tmp[1] is time
#print str(dsatz[0]) + " " + str(dsatz[1]) + " " + str(dsatz[2])
#print str(tmp[0]) + " " + str(dsatz[1]) + " " + str(dsatz[2])
date_list.append(tmp[0])
h_per_day_list.append(dsatz[1])
Gas_consume_list.append(dsatz[2])
#print max(dsatz_2_liste)
# close DB connection
connection.close()
print "\nDay: ",date_list[0]
#for i in range(len(date_list)):
for i in range(len(date_list)):
print h_per_day_list[i], Gas_consume_list[i]/2.0
daily_amount += Gas_consume_list[i]/2.0
print "\nticks: ",daily_amount, "[ticks]"
daily_amount = daily_amount * 0.01
print "Tagesverbrauch: ",daily_amount, "[m^3]"
"""
plot a diagramm
"""
ind = np.arange(len(h_per_day_list))
x = h_per_day_list
y = Gas_consume_list
width = 0.8
p = plt.bar(ind, y, width, color='g')
plt.ylabel('Gas consumption [m^3]')
plt.title('Gas consumption per one day')
#plt.xticks(x)
#plt.legend( (p[0]), ('[m^3]') )
plt.show()
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.18/_downloads/45625b4ba4b19e5b7a60241d89d3f8ef/plot_simulate_evoked_data.py | 9 | 2746 | """
==============================
Generate simulated evoked data
==============================
"""
# Author: Daniel Strohmeier <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import fit_iir_model_raw
from mne.viz import plot_sparse_source_estimates
from mne.simulation import simulate_sparse_stc, simulate_evoked
print(__doc__)
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg-proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fwd_fname)
fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = mne.read_cov(cov_fname)
info = mne.io.read_info(ave_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses from 2 dipoles and the correspond evoked data
times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)
def data_fun(times):
"""Function to generate random source time courses"""
return (50e-9 * np.sin(30. * times) *
np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42, labels=labels, data_fun=data_fun)
###############################################################################
# Generate noisy evoked data
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
nave = 100 # simulate average of 100 epochs
evoked = simulate_evoked(fwd, stc, info, cov, nave=nave, use_cps=True,
iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot(time_unit='s')
| bsd-3-clause |
guziy/basemap | doc/users/figures/geos_partial.py | 3 | 1230 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
# global geostationary map centered on lon_0
lon_0=57.
# resolution = None means don't process the boundary datasets.
m1 = Basemap(projection='geos',lon_0=lon_0,resolution=None)
# add an axes with a black background
ax = fig.add_axes([0.1,0.1,0.8,0.8],axisbg='k')
# plot just upper right quadrant (corners determined from global map).
# keywords llcrnrx,llcrnry,urcrnrx,urcrnry used to define the lower
# left and upper right corners in map projection coordinates.
# llcrnrlat,llcrnrlon,urcrnrlon,urcrnrlat could be used to define
# lat/lon values of corners - but this won't work in cases such as this
# where one of the corners does not lie on the earth.
m = Basemap(projection='geos',lon_0=lon_0,resolution='l',\
llcrnrx=0.,llcrnry=0.,urcrnrx=m1.urcrnrx/2.,urcrnry=m1.urcrnry/2.)
m.drawcoastlines()
m.drawmapboundary(fill_color='aqua')
m.fillcontinents(color='coral',lake_color='aqua')
m.drawcountries()
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,360.,60.))
m.drawmapboundary()
plt.title('Geostationary Map Showing A Quadrant of the Globe')
plt.show()
| gpl-2.0 |
ligo-cbc/pycbc | examples/distributions/spin_spatial_distr_example.py | 14 | 1973 | import numpy
import matplotlib.pyplot as plt
import pycbc.coordinates as co
from mpl_toolkits.mplot3d import Axes3D
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in units
# of pi so we use between 0 and 1.
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi.
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py
# Here we are using the Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution.
# In this case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000)
# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a
# 3rd dimension for a 3D plot that we make later on.
spin_mag = numpy.ndarray(shape=(10000), dtype=float)
for i in range(0,10000):
spin_mag[i] = 1.
# Use pycbc.coordinates as co. Use spherical_to_cartesian function to
# convert from spherical polar coordinates to cartesian coordinates.
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Plot the spherical distribution of spins to make sure that we
# distributed across the surface of a sphere.
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spinx, spiny, spinz, s=1)
ax.set_xlabel('Spin X Axis')
ax.set_ylabel('Spin Y Axis')
ax.set_zlabel('Spin Z Axis')
plt.show()
| gpl-3.0 |
kylerbrown/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
mhue/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/tslibs/test_api.py | 5 | 1034 | # -*- coding: utf-8 -*-
"""Tests that the tslibs API is locked down"""
from pandas._libs import tslibs
def test_namespace():
submodules = ['ccalendar',
'conversion',
'fields',
'frequencies',
'nattype',
'np_datetime',
'offsets',
'parsing',
'period',
'resolution',
'strptime',
'timedeltas',
'timestamps',
'timezones']
api = ['NaT',
'iNaT',
'OutOfBoundsDatetime',
'Period',
'IncompatibleFrequency',
'Timedelta',
'Timestamp',
'delta_to_nanoseconds',
'ints_to_pytimedelta',
'localize_pydatetime',
'normalize_date',
'tz_convert_single']
expected = set(submodules + api)
names = [x for x in dir(tslibs) if not x.startswith('__')]
assert set(names) == expected
| bsd-3-clause |
russel1237/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/io/tests/test_json_norm.py | 15 | 7823 | import nose
from pandas import DataFrame
import numpy as np
import pandas.util.testing as tm
from pandas.io.json import json_normalize, nested_to_record
def _assert_equal_data(left, right):
if not left.columns.equals(right.columns):
left = left.reindex(columns=right.columns)
tm.assert_frame_equal(left, right)
class TestJSONNormalize(tm.TestCase):
def setUp(self):
self.state_data = [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self):
data = [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
result = json_normalize(data, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
self.assertRaises(ValueError, json_normalize, data,
'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
self.assertTrue(val in result)
def test_record_prefix(self):
result = json_normalize(self.state_data[0], 'counties')
expected = DataFrame(self.state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(self.state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in self.state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(tm.TestCase):
def test_flat_stays_flat(self):
recs = [dict(flat1=1,flat2=2),
dict(flat1=3,flat2=4),
]
result = nested_to_record(recs)
expected = recs
self.assertEqual(result, expected)
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1,d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
self.assertEqual(result,expected)
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
self.assertEqual(result,expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb',
'--pdb-failure', '-s'], exit=False)
| gpl-2.0 |
gfrd/gfrd | samples/pushpull/plot.py | 1 | 2295 | #!/usr/bin/env python
import sys
import string
import numpy
import scipy.io
from matplotlib.pylab import *
from fractionS import *
N_A = 6.0221367e23
E2 = 5
V = 1e-15
def plot_theory( K ):
N = 1000
minE1 = 0.1
maxE1 = 100.
e1array = numpy.mgrid[minE1:maxE1:(maxE1-minE1)/N]
farray = [ fraction_Sp( E1, E2, K ) for E1 in e1array ]
farray = numpy.array( farray )
#print farray
semilogx( e1array/E2, farray, label='K = %f' % K )
def file_mean( filename, skip ):
ycolumns = [2,]
#ycolumns = [2,6]
#ycolumns = [3,5]
#ycolumns = [2,6,3,5]
data = load( filename )
x = data[:,0]
y = data[:,ycolumns[0]]
start = x.searchsorted( skip )
if len(x)<=start:
return None
x = x[start:]
y = y[start:]
#print x[-1]
xdiff = x[1:] - x[:-1]
yscaled = y[:-1] * xdiff
yscaledmean = yscaled.sum() / ( x[-1] - x[0] )
print yscaledmean, y.mean()
#return y.mean()
return yscaledmean
import glob
import os
S_tot = 300.0
model = 'pushpull'
Keq_str = '0.05'
#Keq_str = '5'
#koff_ratio_str = '0.1'
#koff_ratio_str = '0.5'
koff_ratio_str = '0.9'
#koff_ratio_str = '0'
N_P = 10
V = '1e-14'
T = '300'
#mode = 'normal'
#mode = 'localized'
mode = 'single'
skip = float(T) *0.9
dir = sys.argv[1]
outdir = sys.argv[2]
#pattern = sys.argv[2]
#globpattern = pattern.replace('ALL','*') + '_*.dat'
for N_K in range( 40 ):
globpattern = \
string.join( ( model, Keq_str, koff_ratio_str, str(N_K),
str(N_P), V, mode, '*' ), '_' )
print globpattern
filelist = glob.glob( dir + os.sep + globpattern )
if not filelist:
continue
data = []
for file in filelist:
print file
res = file_mean( file, skip )
if res:
data.append( res )
data = numpy.array( data )
print data
data /= S_tot
mean = data.mean()
std_err = data.std()/math.sqrt(len(data))
print mean, std_err
errorbar( float(N_K)/N_P, mean, yerr=std_err, fmt='+' )
plot_theory( float( Keq_str ) )
figtitle = string.join( ( model, Keq_str, koff_ratio_str, 'ALL',
str(N_P), V, mode ),
'_' )
title( figtitle )
show()
#savefig( outdir + '/' + figtitle + '.png', dpi=80 )
| gpl-2.0 |
Tuisto59/Peregrination | DERNIERE VERSION BETA/peregrination.py | 1 | 18331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) Copyright Yoan BOUZIN
This file is part of Pérégrination v1.0.
Pérégrination v2.0 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pérégrination v1.0 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pérégrination v2.0. If not, see <http://www.gnu.org/licenses/>. 2
Source des fichier shapefile belgique et france
http://www.gadm.org/country
"""
##########
# IMPORT #
##########
#pure python
from operator import itemgetter
import collections
import csv
import re
import base64
import webbrowser
# get the correct encoding
import locale
print_encoding = locale.getdefaultlocale()[1]
#external library
import numpy as np
from matplotlib import cm
from matplotlib import colors
import folium
#############
# FONCTIONS #
#############
def import_town_gps_coord(town_file):
"""
return dictionary of town with latitude and longitude
input :
town_file (file) :
The file generated by Heredis and SQLite Manager. See the documentation to how have the file
output :
dico_town (dictionary) :
- key (string) : town_name
- value (2-element tuple) : (latitude,longitude)
"""
lol = csv.reader(open(town_file, 'r'),delimiter=",")
dico_town = {rows[0]:[float(rows[1]),float(rows[2])] for rows in lol}
return dico_town
def convert_to_trajectory_GEDCOM(ascdt,town_list,dico_ID,function):
"""
Convert the dictionnary into a list of trajectory
Adapted for GEDCOM importation
input :
ascdt (dictionnary) : the object returned by import_ascendance() function
town_list (dictionnary) : the object returned by import_town_gps_coord()
output :
traj : tuple of 7 elements :
-0 : longitude of the cityA (city of the father/mother)
-1 : latitude of the cityA (city of the father/mother)
-2 : longitude of the cityB (city of the sosa)
-3 : latitude of the cityB (city of the sosa)
-4 : cityA name
-5 : cityB name
-6 : generation of the parents
coo : tuple of 5 elements contening
-0 : longitude of the cityA (city of the father/mother)
-1 : latitude of the cityA (city of the father/mother)
-2 : longitude of the cityB (city of the sosa)
-3 : latitude of the ityB (city of the sosa)
-4 : generation of the parents
"""
list_traj = list()
list_coord = list()
for i in ascdt.keys():
cityB = ascdt[i][3] #ville de l'individue étudié (point d'arrivé)
p = ascdt[i][9] #@id@ du père
m = ascdt[i][10] #@id@ de la mère
for ID in p,m:
#get method : if True , they are ID ind dict, if False, no ID : equal to '' or ID but notpresent because descendent genealogy
if ascdt.get(ID):
g= ascdt[ID][-1]
cityA = ascdt[ID][3]
if cityA != '' :
if cityB != '' :
if cityA != cityB:
function(text='Génération '.decode("iso8859_15")+str(g)+' : '+ascdt[ID][1].decode("iso8859_15")+' ==> '+ascdt[i][1].decode("iso8859_15"))
traj = (town_list[cityA][0],town_list[cityA][1],town_list[cityB][0],town_list[cityB][1],cityA, cityB,g)
coo = (town_list[cityA][0], town_list[cityA][1],town_list[cityB][0],town_list[cityB][1],g)
list_traj += [traj]
list_coord += [coo]
#test !! in the case all the city are identical, few city, descendance ??
if cityA == cityB:
#function(text='Génération '.decode("iso8859_15")+str(g)+' : '+ascdt[ID][1].decode("iso8859_15")+' ==> '+ascdt[i][1].decode("iso8859_15"))
traj = (town_list[cityA][0],town_list[cityA][1],town_list[cityB][0],town_list[cityB][1],cityA, cityB,g)
coo = (town_list[cityA][0], town_list[cityA][1],town_list[cityB][0],town_list[cityB][1],g)
list_traj += [traj]
list_coord += [coo]
#the father/mother of 'i' don't have location, trying to look higher in the pedigree to found a place
if cityA == '' and cityB != '':
# get @id@ of the grand parents of 'i' (the parents 'ID' of 'i' must exist to this point)
liste_ID_before = list()
if ascdt[ID][9] != '':
liste_ID_before += [ascdt[ID][9]]
if ascdt[ID][10] != '':
liste_ID_before += [ascdt[ID][10]]
# if the liste_ID_before is empty, they are no grand parents, continue
# this control are in the case they are no parents in the generation g+1
if len(liste_ID_before) == 0:
continue
else:
#we have grand parents, go control their birth places
city = False
liste_ID_after = list()
while city == False:
#iterate through the liste_ID_before and get the parents-@id@ for each
for id_i in liste_ID_before:
if ascdt[id_i][9] != '':
#Store only if the father and child don't have any Place
if ascdt[id_i][3] == '' and ascdt[ascdt[id_i][9]][3] == '':
#store g_N-father ID
liste_ID_after+=[ascdt[id_i][9]]
if ascdt[id_i][10] != '':
#Store only if the mother and child don't have any Place
if ascdt[id_i][3] == '' and ascdt[ascdt[id_i][10]][3] == '':
#store g_N-mother ID
liste_ID_after+=[ascdt[id_i][10]]
#check point, for the first turn it's not blocked
if len(liste_ID_before) == 0:
break
#analyse each Places for each founded ID
else:
for prts_i in liste_ID_before:
if prts_i != '':
g= ascdt[prts_i][-1]
cityA = ascdt[prts_i][3]
if cityA != '' :
if cityA != cityB:
function('Génération '.decode("iso8859_15")+str(g)+' : '+ascdt[prts_i][1].decode("iso8859_15")+' ==> '+ascdt[i][1].decode("iso8859_15"))
traj = (town_list[cityA][0],town_list[cityA][1],town_list[cityB][0],town_list[cityB][1],cityA, cityB,g)
coo = (town_list[cityA][0], town_list[cityA][1],town_list[cityB][0],town_list[cityB][1],g)
list_traj += [traj]
list_coord += [coo]
#second check point, if they are no parents-@id@ in the generation g+1, break the loop
if len(liste_ID_after) == 0:
break
else:
#replace the liste_ID_before by the liste_ID_after and replace liste_ID_after by an empty list
liste_ID_before = liste_ID_after
liste_ID_after = []
return list(set(list_traj)), list(set(list_coord))
def find_min_max_coordinate(list_coord):
"""
find the minimum and maximum coordinate to trace the map
and add a value to have a margin on the map
input :
list_coord (list) : the 2nd object returned by convert_to_trajectory_ascdt()
output :
- x_min (float) : the minimum longitude
- y_min (float) : the minimum latitude
- x_max (float) : the maximum longitude
- y_max (float) : the maximum latitude
- g_max (integer) : the maximum number of generation
"""
array = np.asarray(list_coord)
minimums = array.min(axis=0)
y1_min, x1_min, y2_min, x2_min, g_min = minimums
x_min = min(x1_min, x2_min)
y_min = min(y1_min, y2_min)
maximums = array.max(axis=0)
y1_max, x1_max, y2_max, x2_max, g_max = maximums
x_max = max(x1_max, x2_max)
y_max = max(y1_max, y2_max)
#after found the min and max I had an extra value to have a margin in the map
x_min = x_min-0.5
y_min = y_min-0.5
x_max = x_max+0.5
y_max= y_max+0.5
g_max = int(g_max)
return y_min, x_min, y_max, x_max, g_max
def find_nth_character(str1, substr, n):
"""
return the index of the nth querring subtring
input :
str1 (string) : the string
substr (string) : the target string
n (integer) : the nth occurence you looking for
return :
pos (integer) : index of the substring at the nth position
"""
pos = -1
for x in xrange(n):
pos = str1.find(substr, pos+1)
if pos == -1:
return None
return pos
def codec(strA,strB):
"""
Find the correct combination of encoding to concatenate two string
Related to create_annonation_text_gedcom
input : strA, strB : string to concatenate
output : concatenation of strA and strB
"""
try:
strAB = strA.encode('iso8859_15')+strB
return strAB
except:
try:
strAB = strA+strB.encode('iso8859_15')
return strAB
except:
try:
A = strA.decode('iso8859_15')
B = strB.decode('iso8859_15')
strAB = A + B
return strAB
except:
A = strA.encode('iso8859_15')
B = strB.encode('iso8859_15')
strAB = A + B
return strAB
def multiple_wedding_gedcom(line):
"""
Separates information from each marriage in independent group.
In the case of multiple marriage, Hérédis concatenate the event data
in the same line and uses a bulleted list. This function gathers data on
the husband / wife, the date and town wedding.
input :
line (list) : the line with the multiple wedding
output :
list_of_result (list of 3-elements tuple)
name, date, town
Using RegEx :
- to catch only family name
((?:(?:particules) )?[capital_letter special_capital_letter symbol]+\b)
particules : list of particule are :
d'|de|des|la|DE|VAN|LE
capital_letter : all the capital letter of the alphabet
ABCDEFGHIJKLMNOPQRSTUVWXYZ
special_capital_letter : special letter for foreign familly names
ÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð
other_symbol : generally used for composed familly names or to show variation
, . / ( ) -
- to cath only town
(?!\s)[\w\s-]+(?<!\s)
- to catch only years
[0-9]{4}
"""
list_of_result = list()
splitted_names = line[4].split('\x95')
splitted_dates = line[5].split('\x95')
splitted_towns = line[6].split('\x95')
data = zip(splitted_names, splitted_dates, splitted_towns)
for ndt in data:
n, d, t = ndt
names_result = " ".join(re.findall(ur"((?:(?:d'|de|des|la|DE|VAN|LE) )?[A-ZÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ð,.'-\(\)/]+\b)",unicode(n.decode('iso8859-15')),re.UNICODE))
date_result = "".join(re.findall(r"[0-9]{4}",d))
if t:
if t[-1].isspace():
town_result = t[1:len(t)-1]
elif t[0].isspace():
town_result = t[1:]
else:
town_result = ''
#town_result = "".join(re.findall(ur"(?!\s)[\w\s-]+(?<!\s)",unicode(t.decode('iso8859-15')),re.UNICODE))
if names_result == '' and date_result == '' and town_result == '':
continue
else:
#list_of_result += [(names_result,date_result,town_result)] #avant
list_of_result += [(names_result,date_result,town_result)] #test town_result with no encode
return list_of_result
def generate_map_gedcom(typ,y_min, x_min, y_max, x_max,g_max,list_traj,dico_annotation, popup_trajectory, filename, shapefile):
"""
Generate Open Street Map HTML page
"""
xmean = np.mean([x_min,x_max])
ymean = np.mean([y_min,y_max])
if typ == 1:
list_traj = sorted(list_traj, key=itemgetter(6), reverse=True)
elif typ == 2:
list_traj = sorted(list_traj, key=itemgetter(6), reverse=True)
my_map1 = folium.Map(location=[ymean,xmean],tiles='Stamen Terrain',zoom_start=6)
my_map2 = folium.Map(location=[ymean,xmean],zoom_start=6)
for my_map in my_map1, my_map2:
town_set = set()
dico_traj_size = dict()
#create step color map
hexa_colors = list()
for g in range(1, g_max+1):
cm_object = cm.Paired(1.*g/g_max)
rgb = cm_object[:3]
hexa = colors.rgb2hex(rgb)
hexa_colors.append(hexa)
#make legend
#colormap = folium.colormap.linear.Paired.scale(1, g_max).to_step(g_max)
colormap = folium.colormap.StepColormap(hexa_colors, index=None, vmin=1.0, vmax=float(g_max), caption= u"Générations")
my_map.add_child(colormap)
#make shapefile (only if its in group mode)
if shapefile:
my_map.choropleth(geo_path='data_tmp.json',fill_color='red')
nb_polyline = dict()
#dico to store the town description and avoid overlap of marker when
dico_pop = dict()
dico_size = dict()
for data in list_traj:
y1,x1,y2,x2,m1,m2,g= data
#polyline
if ((y1,x1),(y2,x2)) not in nb_polyline.keys():
nb_polyline[(y1,x1),(y2,x2)] = 1
else:
nb_polyline[(y1,x1),(y2,x2)] += 1
for y, x, m, g in (y1,x1,m1,g),(y2,x2,m2,g):
try:
pop = m.decode('iso8859_15')+"\n"+dico_annotation[m][0].decode('iso8859_15')
except UnicodeEncodeError:
pop = m.decode('iso8859_15')+"\n"+dico_annotation[m][0]
if (y,x) not in dico_pop.keys():
pop_ad = set()
pop_ad.add(pop)
dico_pop[(y,x)] = pop_ad
else:
pop_ad = dico_pop[(y,x)]
pop_ad.add(pop)
dico_pop[(y,x)] = pop_ad
#generation polyline marker size
if (y,x) not in dico_size.keys():
dico_size[(y,x)]=[g]
else:
if g not in dico_size[(y,x)]:
dico_size[(y,x)] += [g]
for data in list_traj:
#data
y1,x1,y2,x2,m1,m2,g= data
#get color
cm_object = cm.Paired(1.*g/g_max)
rgb = cm_object[:3]
hexa = colors.rgb2hex(rgb)
#trajectory
#avoid when trajectory have the same start/end location
#it's happened when subdivision are not found and the level is "town" location level
if (y1,x1) != (y2,x2):
for key in (y1,x1), (y2,x2):
sorted_g = sorted(dico_size[(key)])
size = (sorted_g.index(g) + 1) * 10
folium.PolyLine([key,key], color=hexa, weight=size, opacity=0.9).add_to(my_map)
folium.PolyLine([(y1,x1),(y2,x2)], popup=popup_trajectory[(m1,m2)].decode('iso8859_15') , color=hexa, weight=nb_polyline[(y1,x1),(y2,x2)]*5, opacity=0.9).add_to(my_map)
nb_polyline[(y1,x1),(y2,x2)] -= 1
else:
sorted_g = sorted(dico_size[(y1,x1)])
size = (sorted_g.index(g) + 1) * 10
folium.PolyLine([(y1,x1),(y1,x1)], color=hexa, weight=size, opacity=0.9).add_to(my_map)
#marker
if (y1,x1) not in town_set:
folium.Marker([y1, x1], popup=' '.join(list(dico_pop[(y1,x1)]))).add_to(my_map)
town_set.add((y1,x1))
if (y2,x2) not in town_set:
folium.Marker([y2, x2], popup=' '.join(list(dico_pop[(y2,x2)]))).add_to(my_map)
town_set.add((y2, x2))
for key, (text, y, x) in dico_annotation.items():
if (y,x) not in town_set:
icon = folium.Icon(color=u'black')
town_set.add(key)
try:
folium.Marker([y,x], popup=key.decode('iso8859_15')+"\n"+text.decode('iso8859_15'), icon=icon).add_to(my_map)
except UnicodeEncodeError:
folium.Marker([y,x], popup=key+"\n"+text, icon=icon).add_to(my_map)
filename1 = filename.replace(' ','_').replace('.ged','')+'_map_'+str(typ)+'_1.html'
filename2 = filename.replace(' ','_').replace('.ged','')+'_map_'+str(typ)+'_2.html'
my_map1.save(filename1)
my_map2.save(filename2)
webbrowser.open(filename1)
webbrowser.open(filename2)
| gpl-2.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/mpl_toolkits/axes_grid1/axes_grid.py | 7 | 31905 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
from . import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
def _extend_axes_pad(value):
# Check whether a list/tuple/array or scalar has been passed
ret = value
if not hasattr(ret, "__getitem__"):
ret = (value, value)
return ret
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1],
color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator = kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need" +
" to be given, not both")
else:
kwargs["ticks"] = locator
self.hold(True)
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(
# int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation = kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if (type(axes_class)) == type and \
issubclass(axes_class,
self._defaultLocatableAxesClass.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=False)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=False)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
if share_x:
sharex = self._column_refax[col]
else:
sharex = None
if share_y:
sharey = self._row_refax[row]
else:
sharey = None
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_axes_pad(self, axes_pad):
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._horiz_pad_size = Size.Fixed(axes_pad[0])
self._vert_pad_size = Size.Fixed(axes_pad[1])
def _update_locators(self):
h = []
h_ax_pos = []
for _ in self._column_refax:
#if h: h.append(Size.Fixed(self._axes_pad))
if h:
h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
for _ in self._row_refax[::-1]:
#if v: v.append(Size.Fixed(self._axes_pad))
if v:
v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows - 1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
# Good to propagate __len__ if we have __getitem__
def __len__(self):
return len(self.axes_all)
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
get geometry of the grid. Returns a tuple of two integer,
representing number of rows and number of columns.
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"set axes_pad"
self._axes_pad = axes_pad
# These two lines actually differ from ones in _init_axes_pad
self._horiz_pad_size.fixed_size = axes_pad[0]
self._vert_pad_size.fixed_size = axes_pad[1]
def get_axes_pad(self):
"""
get axes_pad
Returns
-------
tuple
Padding in inches, (horizontal pad, vertical pad)
"""
return self._axes_pad
def set_aspect(self, aspect):
"set aspect"
self._divider.set_aspect(aspect)
def get_aspect(self):
"get aspect"
return self._divider.get_aspect()
def set_label_mode(self, mode):
"set label_mode"
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
# from axes_size import AddList
# vsize = AddList(self._divider.get_vertical())
# hsize = AddList(self._divider.get_horizontal())
# return vsize, hsize
class ImageGrid(Grid):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. ImageGrid is used in such case.
"""
_defaultCbarAxesClass = CbarAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Build an :class:`ImageGrid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
axes_class None a type object which must be a subclass
of axes_grid's subclass of
:class:`~matplotlib.axes.Axes`
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
if cbar_pad is None:
# horizontal or vertical arrangement?
if cbar_location in ("left", "right"):
self._colorbar_pad = axes_pad[0]
else:
self._colorbar_pad = axes_pad[1]
else:
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if isinstance(axes_class, maxes.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
self.cbar_axes = []
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=aspect)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
if self.axes_all:
sharex = self.axes_all[0]
sharey = self.axes_all[0]
else:
sharex = None
sharey = None
else:
sharex = self._column_refax[col]
sharey = self._row_refax[row]
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
if share_all:
if self._refax is None:
self._refax = ax
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
cax = self._defaultCbarAxesClass(fig, rect,
orientation=self._colorbar_location)
self.cbar_axes.append(cax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all+self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
self.set_label_mode(label_mode)
def _update_locators(self):
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if (self._colorbar_mode == "single" and
self._colorbar_location in ('left', 'bottom')):
if self._colorbar_location == "left":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif self._colorbar_location == "bottom":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if h:
h.append(self._horiz_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == 0)) and self._colorbar_location == "left":
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == self._ncols - 1)) and
self._colorbar_location == "right"):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if v:
v.append(self._vert_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == 0)) and self._colorbar_location == "bottom":
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == self._nrows - 1)) and
self._colorbar_location == "top"):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
#locator = self._divider.new_locator(nx=4*col,
# ny=2*(self._nrows - row - 1))
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if self._colorbar_mode == "each":
if self._colorbar_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif self._colorbar_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif self._colorbar_mode == 'edge':
if ((self._colorbar_location == 'left' and col == 0) or
(self._colorbar_location == 'right'
and col == self._ncols-1)):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows -1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif ((self._colorbar_location == 'bottom' and
row == self._nrows - 1) or
(self._colorbar_location == 'top' and row == 0)):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if self._colorbar_mode == "single":
if self._colorbar_location == "right":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif self._colorbar_location == "top":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if self._colorbar_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif self._colorbar_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif self._colorbar_mode == "edge":
if self._colorbar_location in ('right', 'left'):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
#if __name__ == "__main__":
if 0:
F = plt.figure(1, (7, 6))
F.clf()
F.subplots_adjust(left=0.15, right=0.9)
grid = Grid(F, 111, # similar to subplot(111)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.05,
add_all=True,
label_mode = "1",
)
#if __name__ == "__main__":
if 0:
from .axes_divider import get_demo_image
F = plt.figure(1, (9, 3.5))
F.clf()
F.subplots_adjust(left=0.05, right=0.98)
grid = ImageGrid(F, 131, # similar to subplot(111)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.05,
add_all=True,
label_mode = "1",
)
Z, extent = get_demo_image()
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
# This only affects axes in
# first column and second row as share_all = False.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
grid = ImageGrid(F, 132, # similar to subplot(111)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.0,
add_all=True,
share_all=True,
label_mode = "1",
cbar_mode="single",
)
Z, extent = get_demo_image()
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
plt.colorbar(im, cax=grid.cbar_axes[0])
plt.setp(grid.cbar_axes[0].get_yticklabels(), visible=False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
grid = ImageGrid(F, 133, # similar to subplot(122)
nrows_ncols=(2, 2),
direction="row",
axes_pad = 0.1,
add_all=True,
label_mode = "1",
share_all = True,
cbar_location="top",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
)
plt.ioff()
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
plt.colorbar(im, cax=grid.cbar_axes[i],
orientation="horizontal")
grid.cbar_axes[i].xaxis.set_ticks_position("top")
plt.setp(grid.cbar_axes[i].get_xticklabels(), visible=False)
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
plt.ion()
plt.draw()
| mit |
Delosari/dazer | bin/lib/Astro_Libraries/spectrum_fitting/import_functions.py | 1 | 34109 | import os
import sys
import numpy as np
import ConfigParser
from errno import ENOENT
from numpy import loadtxt
from pandas import read_excel, read_csv
from collections import OrderedDict
from scipy.interpolate import interp1d
from distutils.util import strtobool
from astropy.io import fits as astropyfits
# Function to create folders
def make_folder(folder_path):
#TODO This one is only valid for 2.7
#TODO add this one to your collection
try:
os.makedirs(folder_path)
except OSError:
if not os.path.isdir(folder_path):
raise
return
# Function to delete files
def silent_remove(filename_list):
for filename in filename_list:
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occurred
# Sample data for FIT3D compilation
def example_data(data_folder):
arguments_dict = OrderedDict()
arguments_dict['script'] = 'auto_ssp_elines_rnd.py' # 0
arguments_dict['input_spec'] = 'NGC5947.spec_5.txt' # 1
arguments_dict['SSPs_lib'] = 'ssp_lib.fits,' + 'ssp_lib.fits' # 2
arguments_dict['output_file'] = 'auto_ssp.NGC5947.cen.only.out' # 3
arguments_dict['mask_file'] = 'mask_elines.txt' # 4
arguments_dict['conf_file'] = 'auto_ssp_V500_several_Hb.config' # 5
arguments_dict['plot_tag'] = 1 # 6
arguments_dict['min'] = -1 # 7
arguments_dict['max'] = 40 # 8
arguments_dict['wmin'] = '3850' # 9
arguments_dict['wmax'] = '6800' # 10
arguments_dict['z_elines_mask'] = 'emission_lines.txt' # 11
arguments_dict['input_z'] = 0.02 # 12
arguments_dict['delta_z'] = 0.001 # 13
arguments_dict['min_z'] = 0.015 # 14
arguments_dict['max_z'] = 0.025 # 15
arguments_dict['input_sigma'] = 2.0 # 16
arguments_dict['delta_sigma'] = 0.5 # 17
arguments_dict['min_sigma'] = 1 # 18
arguments_dict['max_sigma'] = 9 # 19
arguments_dict['input_Av'] = 0.5 # 20
arguments_dict['delta_Av'] = 0.1 # 21
arguments_dict['min_Av'] = 0.0 # 22
arguments_dict['max_Av'] = 1.6 # 23
return arguments_dict
# Function to check for nan entries
def check_missing_flux_values(flux):
# Evaluate the nan array
nan_idcs = np.isnan(flux)
nan_count = np.sum(nan_idcs)
# Directly save if not nan
if nan_count > 0:
print '--WARNING: missing flux entries'
return
# Function to import configuration data
def parseObjData(file_address, sectionName, objData):
parser = ConfigParser.SafeConfigParser()
parser.optionxform = str
if os.path.isfile(file_address):
parser.read(file_address)
if not parser.has_section(sectionName):
parser.add_section(sectionName)
for key in objData.keys():
value = objData[key]
if value is not None:
if isinstance(value, list) or isinstance(value, np.ndarray):
value = ','.join(str(x) for x in value)
else:
value = str(value)
else:
value = ''
parser.set(sectionName, key, value)
with open(file_address, 'w') as f:
parser.write(f)
return
# Function to save data to configuration file section
def parseDataFile(file_address, section, data, type_data=None, key_suffix = ''):
# Check if file exists
if os.path.isfile(file_address):
cfg = ConfigParser.ConfigParser()
cfg.optionxform = str
cfg.read(file_address)
else:
exit('--WARNING: Default configuration could not be found exiting program\n-Missing file: {}'.format(file_address))
# Check section is in conf.ini else create it
if not cfg.has_section(section):
cfg.add_section(section)
# Change format to safe data in dictionary
for key in data:
value = data[key]
if type_data is not None:
# TODO add a protocol to infer best format to save data
if type_data is 'lists':
value = list(value)
value = ','.join(str(x) for x in value)
# try:
# confDict[option] = np.array(map(float, raw_list.split(',')))
# except:
# confDict[option] = np.array(map(str, raw_list.split(',')))
cfg.set(section, key + key_suffix, value)
with open(file_address, 'w') as f:
cfg.write(f)
return
# Class with tools to import SSPs libraries
class SspSynthesisImporter:
def __init__(self):
# ------------Configuration of Fit3D
self.sspSyn_commands_params = [
'script', # 0 python script name
'input_spec', # 1 input galactic spectrum name
'SSPs_lib', # 2 fits-table to use with python
'output_file', # 3 Reference name for output files
'mask_file', # 4 File with the spectrum region masks
'conf_file', # 5 Configuration file for the masks
'plot_tag', # 6 tag to launch the plotting
'min', # 7 Min flux for ploting
'max', # 8 Max flux for ploting
'wmin', # 9 Minimum wavelength for plotting
'wmax', # 10 Maximum wavelength for plotting
'z_elines_mask', # 11 Emission lines file
'input_z', # 12 Input redshift
'delta_z', # 13 Increments for redshift
'min_z', # 14 Minimum redshift
'max_z', # 15 Maximum redshift
'input_sigma', # 16 Input velocity dispersion
'delta_sigma', # 17 Increments for velocity dispersion
'min_sigma', # 18 Minimum velocity dispersion
'max_sigma', # 19 Maximum velocity dispersion
'input_Av', # 20 Input reddening
'delta_Av', # 21 Increments for reddening
'min_Av', # 22 Minimum reddening
'max_Av', # 23 Maximum reddening
]
# The first 4 lines in the configuration file describe the input
self.sspSyn_config_params = [['input_z', 'delta_z', 'min_z', 'max_z', 'DV', 'RV', 'DS', 'RS', 'MIN_W', 'MAX_W'],
# 12-16
['input_sigma', 'delta_sigma', 'min_sigma', 'max_sigma'],
# 17-20
['input_Av', 'delta_Av', 'min_Av', 'max_Av'],
# 21-24
['N_Systems'], # Number of SSP bases
['START_W', 'END_W', 'MASK_FILE', 'CONFIG_FILE', 'NPOLY', 'MASK_FILE_POLY',
'N_MIN_E', 'N_MAX_E'], # Bases config
['MIN_DELTA_CHISQ', 'MAX_NITER', 'CUT_MEDIAN_FLUX'],
['start_w_peak', 'end_w_peak'],
['wavelength_to_norm', 'width_AA', 'new_back_templates.fits']]
# Bases float indeces
self.idcs_floats = np.array([0, 1, 4, 6, 7])
# Emision lines mask columns headers
self.eline_mask_header = ['start_wave', 'end_wave', 'mask_file', 'mask_config_file', 'n_poly', 'mask_file_poly',
'n_min_e', 'n_max_e']
# Number of montercarlo iterations
self.n_mc = 30
# Initial value for the chiSq_min
self.chiSq_min = 1e12
return
def load_FIT3D_data(self, conf_file, data_folder=None):
# Check if we are executing from the folder file
data_folder = os.getcwd() + '/' if data_folder is None else data_folder
# Read parameters from command line
command_dict = self.load_FIT3D_command_params(data_folder=data_folder)
config_dict = self.load_FIT3D_config_file(conf_file)
# Update the fit configuration giving preference to the values from the command line
config_dict.update(command_dict)
# Load observational data and masks
config_dict = self.load_FIT3D_observational_fits(data_folder, config_dict)
# Prepare output files
output_root = config_dict['output_file'][:config_dict['output_file'].rfind('.')]
config_dict['single_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='single', ext='txt')
config_dict['coeffs_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='coeffs', ext='txt')
config_dict['spectrum_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='spec', ext='txt')
config_dict['em_lines_output_file'] = '{rootname}_{file_code}.{ext}'.format(rootname=output_root,
file_code='elines', ext='txt')
# Delete these output files if they had been generated from a previos run #USEFULL_Function
silent_remove([config_dict['output_file'], config_dict['single_output_file'], config_dict['coeffs_output_file'],
config_dict['spectrum_output_file'], config_dict['em_lines_output_file']])
# Store folder with the data and configuration folder
config_dict['data_folder'] = data_folder
config_dict['conf_file'] = conf_file
config_dict['data_type'] = 'FIT3D'
return config_dict
def load_FIT3D_command_params(self, data_folder):
# Empty dictionary to store the data from the commands from the command line
command_dict = OrderedDict()
# Extract line command arguments
self.args_list = sys.argv
# Check if the minimum parameters have been introduced (WARNING: Need to convert these to the right units)
if len(self.args_list) > 7:
command_dict = OrderedDict(zip(self.sspSyn_commands_params[:len(self.args_list)], self.args_list))
else:
print '--Error: The input command must include all these arguments:'
print ', '.join(self.sspSyn_commands_params[:7])
# Currently run test example if not enought data is provided
print '---Using example data'
command_dict = example_data(data_folder=data_folder)
return command_dict
def load_FIT3D_config_file(self, config_file_address):
# Empty dictionary to store the data from the config file
fit_conf_dict = {}
# Read the configuration text file
with open(config_file_address) as conf_file:
conf_lines = conf_file.readlines()
# Read redshift, sigma and Av params rows
for i in range(3):
param_values = np.array(conf_lines[i].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[i], param_values))
# Read masks rows: 'START_W_n','END_W_n','MASK_FILE_n' ...
nLineMasks = int(conf_lines[3])
fit_conf_dict['nLineMasks'] = int(conf_lines[3])
for i in range(4, 4 + fit_conf_dict['nLineMasks']):
bases_key = 'base_{}'.format(i - 4)
param_values = np.array(conf_lines[i].split())
# Convert to float numerical entries
param_values[0] = float(param_values[0])
param_values[1] = float(param_values[1])
param_values[4] = float(param_values[4])
param_values[6] = float(param_values[6])
param_values[7] = float(param_values[7])
fit_conf_dict[bases_key] = param_values
# Add ChiSq row (converting to float)
param_values = np.array(conf_lines[4 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[5], param_values))
# Add peak wavelength row (converting to float)
param_values = np.array(conf_lines[5 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[6], param_values))
# Normalizing row (if available) (converting to float)
if len(conf_lines) == 7 + nLineMasks:
param_values = np.array(conf_lines[6 + nLineMasks].split(), dtype=float)
fit_conf_dict.update(zip(self.sspSyn_config_params[7], param_values))
else:
fit_conf_dict['wave_norm'] = None
fit_conf_dict['w_wave_norm'] = None
fit_conf_dict['new_back_file'] = None
return fit_conf_dict
def load_FIT3D_mask(self, config_dict, obs_flux_resam):
obs_wave = config_dict['obs_wave']
# --------------Generating spectrum mask
# Load spectrum masks
mask_xmin, mask_xmax = loadtxt(config_dict['data_folder'] + config_dict['mask_file'], unpack=True)
# Load emission lines reference to generate artificial mask
emLine_wave = loadtxt(config_dict['data_folder'] + config_dict['z_elines_mask'], usecols=([0]), unpack=True)
emLine_mask_xmin = emLine_wave * (1 + config_dict['input_z']) - 4.0 * config_dict['input_sigma']
emLine_mask_xmax = emLine_wave * (1 + config_dict['input_z']) + 4.0 * config_dict['input_sigma']
# Firt check non zero entries
idx_mask_zero = (obs_flux_resam != 0)
# Pixels within the spectrum mask
idx_spec_mask = np.ones(len(obs_wave), dtype=bool)
for i in range(len(mask_xmin)):
idx_cur_spec_mask = (obs_wave > mask_xmin[i]) & (obs_wave < mask_xmax[i])
idx_spec_mask = idx_spec_mask & ~idx_cur_spec_mask
# Pixels within the emline mask
idx_emline_mask = np.ones(len(obs_wave), dtype=bool)
for i in range(len(emLine_wave)):
idx_cur_emline_mask = (obs_wave > emLine_mask_xmin[i]) & (obs_wave < emLine_mask_xmax[i])
idx_emline_mask = idx_emline_mask & ~idx_cur_emline_mask
# Recover wavelength limits for the masks
wmin_str, wmax_str = config_dict['wmin'].split(','), config_dict['wmax'].split(',')
wmin = float(wmin_str[0]) if len(wmin_str) == 2 else float(config_dict['wmin'])
wmax = float(wmax_str[0]) if len(wmax_str) == 2 else float(config_dict['wmax'])
idx_mask_wmin, idx_mask_wmax = (obs_wave > wmin), (obs_wave < wmax)
# Combined individual indeces into a global mask
print idx_mask_zero.shape
print idx_spec_mask.shape
print idx_emline_mask.shape
print idx_mask_wmax.shape
total_masks = idx_mask_zero & idx_spec_mask & idx_emline_mask & idx_mask_wmin & idx_mask_wmax
return total_masks
def load_FIT3D_observational_fits(self, data_folder, config_dict):
# --------------Read observational data
obs_data = loadtxt(data_folder + config_dict['input_spec'])
obs_wave = obs_data[:, 1]
obs_flux = obs_data[:, 2]
obs_fluxVar = obs_data[:, 3]
# Issues with spectra: nan entries
check_missing_flux_values(obs_flux)
# Get the error from the library fits
if obs_fluxVar is not None:
obs_flux_err = np.sqrt(abs(obs_fluxVar))
# Else calculate it from the spectrum
else:
obs_flux_err = np.sqrt(abs(obs_flux) / 10)
# Remove big error entries
median_err = np.median(obs_flux_err)
idx_big_err = (obs_flux_err > 1.5 * median_err)
obs_fluxErrAdj = np.copy(obs_flux_err)
obs_fluxErrAdj[idx_big_err] = 1.5 * median_err
# --------------Store data
config_dict['obs_wave'] = obs_wave
config_dict['obs_flux'] = obs_flux
config_dict['obs_flux_err'] = obs_flux_err
config_dict['obs_fluxErrAdj'] = obs_fluxErrAdj
config_dict['nObsPix'] = len(obs_flux)
return config_dict
def import_Fit3D_ssplibrary(self, ssp_file_address):
# Dictionary to store the data
ssp_lib_dict = {}
fluxBases, hdrBases = astropyfits.getdata(ssp_file_address, 0, header=True)
fluxBases = np.asfortranarray(fluxBases)
nBases, nPixelsBases = fluxBases.shape
crpix, cdelt, crval = hdrBases['CRPIX1'], hdrBases['CDELT1'], hdrBases['CRVAL1']
pixArray = np.arange(0, nPixelsBases) # WARNING should this arrange start at one?
basesWavelength = (crval + cdelt * (pixArray + 1 - crpix))
# Extract age and metallicity from the bases names
Z_vector, age_vector = np.empty(nBases), np.empty(nBases)
for i in range(nBases):
header_code = 'NAME{}'.format(i)
# Read metallicity and age from and headers list
base_keyname = hdrBases[header_code]
age_str = base_keyname[9:base_keyname.find('_z')]
metal_str = base_keyname[base_keyname.find('_z') + 2:base_keyname.rfind('.')]
age_factor = 1000.0 if 'Myr' in age_str else 1
age_vector[i] = float(age_str[:-3]) / age_factor
Z_vector[i] = float('0.' + metal_str)
# Staore library data in a dictionary
ssp_lib_dict['crpix_bases'] = crpix
ssp_lib_dict['cdelt_bases'] = cdelt
ssp_lib_dict['crval_bases'] = crval
ssp_lib_dict['basesWave'] = basesWavelength
ssp_lib_dict['nBases'] = nBases
ssp_lib_dict['nPixBases_max'] = nPixelsBases
ssp_lib_dict['fluxBases'] = fluxBases
ssp_lib_dict['hdrBases'] = hdrBases
ssp_lib_dict['ageBases'] = age_vector
ssp_lib_dict['zBases'] = Z_vector
# ssp_lib_dict['bases_one_array'] = ones(nBases)
return ssp_lib_dict
def import_STARLIGHT_ssplibrary(self, bases_folder, libraries_file_list):
print '\n--Importing STARLIGHT library'
print '---Bases file: {}'.format(libraries_file_list)
print '---Bases folder: {}'.format(bases_folder)
# Dictionary to store the data
ssp_lib_dict = {}
columns_names = ['file_name', 'age_yr', 'z_star', 'bases_nickname', 'f_star', 'YAV_flag', 'alpha/Fe']
bases_df = read_csv(libraries_file_list, delim_whitespace=True, names=columns_names, skiprows=1)
# Initial pass to check the biggest size
nBases = len(bases_df.index)
max_nPixelsBases = 0
# Empty contaiores to store the data
waveBases_orig = []
fluxBases_orig = []
Z_vector, age_vector = np.empty(nBases), np.empty(nBases)
for i in range(nBases):
bases_file = bases_folder + bases_df.iloc[i]['file_name']
wave_base_i, flux_base_i = loadtxt(bases_file, unpack=True)
# Original wavelength range and fluxes from the bases. They may have different wavelength range
waveBases_orig.append(
wave_base_i) # This is not pretty but not other option if bases do not have same length...
fluxBases_orig.append(
flux_base_i) # This is not pretty but not other option if bases do not have same length...
# Interpolate the bases to observed wavelength resolution (1 angstrom per pixel is the current rule)
age_vector[i] = bases_df.iloc[i]['age_yr']
Z_vector[i] = bases_df.iloc[i]['z_star']
ssp_lib_dict['basesWave'] = waveBases_orig # This is not pretty but not other option if bases do not have same length...
ssp_lib_dict['nBases'] = nBases
ssp_lib_dict['nPixBases_max'] = max_nPixelsBases
ssp_lib_dict['fluxBases'] = fluxBases_orig # This is not pretty but not other option if bases do not have same length...
ssp_lib_dict['ageBases'] = age_vector
ssp_lib_dict['zBases'] = Z_vector
# ssp_lib_dict['bases_one_array'] = ones(nBases)
print '--Library imported'
return ssp_lib_dict
# Class with SpecSyzer dataloading tools
class ImportModelData(SspSynthesisImporter):
def __init__(self, confFolder):
# Class with tools to import starlight bases
SspSynthesisImporter.__init__(self)
# Load default configuration file
self.config = self.load_confFile(confFolder, 'config.ini')
# Define default folders
self.dataFolder = os.path.join(os.path.expanduser('~'), self.config['inference_folder'])
self.inputsFolder = os.path.join(self.dataFolder, self.config['input_data_folder'])
self.outputsFolder = os.path.join(self.dataFolder, self.config['output_data_folder'])
self.externalDataFolder = os.path.join(confFolder, self.config['external_data_folder']) # TODO this declaration is not universal with operative system try pathlib
self.linesFormatDf = os.path.join(confFolder, self.config['external_data_folder'])
self.configFolder = os.path.join(confFolder, 'config.ini')
self.linesDb = read_excel(os.path.join(self.externalDataFolder, self.config['linesData_file']), sheet_name=0, header=0, index_col=0)
def load_confFile(self, root_folder, confFile):
# Configuration file address
file_address = '{}/{}'.format(root_folder, confFile)
# Check if file exists
if os.path.isfile(file_address):
cfg = ConfigParser.ConfigParser()
cfg.optionxform = str
cfg.read(file_address)
else:
exit('--WARNING: Default configuration could not be found exiting program')
# Loop through configuration file sections and merge into a dictionary
confDict = dict(cfg.items('conf_entries'))
confDict['sections'] = cfg.sections()
for i in range(1, len(cfg.sections())):
section = cfg.sections()[i]
confDict[section] = cfg.options(section)
for option in cfg.options(section):
if (option in confDict['string_conf']) or ('_folder' in option) or ('_file' in option):
confDict[option] = cfg.get(section, option)
elif '_check' in option:
confDict[option] = cfg.getboolean(section, option)
elif (option in confDict['list_conf']) or ('_parameters' in option) or ('_prior' in option) or ('_list' in option) or ('_coeffs' in option):
raw_list = cfg.get(section, option)
# Special entry
if option is 'input_lines':
if raw_list is 'all':
confDict[option] = raw_list
else:
confDict[option] = np.array(map(str, raw_list.split(',')))
# By default try to read as a list of floats else strings
else:
try:
confDict[option] = np.array(map(float, raw_list.split(',')))
except:
confDict[option] = np.array(map(str, raw_list.split(',')))
# By default read as a float
else:
confDict[option] = cfg.getfloat(section, option)
# Include configuration file in the dictionary
confDict['confAddress'] = file_address
return confDict
def load_obsData(self, obsFile=None, objName=None):
# TODO this should go into the master configuration
list_parameters = ['input_lines', 'Av_prefit','sigma_star_prefit', 'coeffsPop_prefit', 'coeffsPopErr_prefit', 'wavelengh_limits', 'norm_interval'] #also all 'param_prior'
boolean_parameters = ['Normalized_by_Hbeta']
string_parameters = ['address_lines_log', 'address_spectrum', 'address_obs_mask', 'obsFile', 'objName']
# ----Load the obj data
if obsFile is not None:
cfg = ConfigParser.SafeConfigParser()
cfg.optionxform = str
cfg.read(obsFile)
# If not section is provided we assume the file only has one and it gives us the properties of the observation
if objName is None:
objName = cfg.options(cfg.sections()[0])
# Dictionary with the observation data
obj_data = dict(cfg.items(objName))
obj_data['obsFile'] = obsFile
obj_data['objName'] = objName
#Recover data from previous fits
results_section = objName + '_results'
if cfg.has_section(results_section):
prefit_data = dict(cfg.items(results_section))
obj_data.update(prefit_data)
else:
# Dictionary with the observation data # TODO This does not work so well
obj_data = locals()
# Convert to the right format # TODO Add security warnings for wrong data
for key in obj_data.keys():
# Empty variable
if obj_data[key] == '':
obj_data[key] = None
# None variable
elif obj_data[key] is None:
obj_data[key] = None
# Arrays (The last boolean overrides the parameters
elif ',' in obj_data[key]:
if (key in list_parameters) or ('_prior' in key) or ('_true' in key) or (',' in obj_data[key]):
if key in ['input_lines']:
if obj_data[key] == 'all':
obj_data[key] = 'all'
else:
obj_data[key] = np.array(map(str, obj_data[key].split(',')))
else:
newArray = []
textArrays = obj_data[key].split(',')
for item in textArrays:
convertValue = float(item) if item != 'None' else np.nan
newArray.append(convertValue)
obj_data[key] = np.array(newArray)
# Boolean
elif (key in boolean_parameters) or ('_check' in key):
obj_data[key] = strtobool(obj_data[key]) == 1
# Remaining are either strings (rest floats)
elif key not in string_parameters:
obj_data[key] = float(obj_data[key])
# #Unrecognize object function
# else:
# print 'WARNING: Parameter {} in {} not recognize. Exiting code'.format(key, obsFile)
# exit()
# ----Load the obj spectrum, #TODO read this one using pandas and that way you can chek if there is a third column for the error
obj_data['obs_wavelength'], obj_data['obs_flux'] = loadtxt(obj_data['address_spectrum'], usecols=(0, 1), unpack=True)
# ----Load obj lines log # TODO update code to use address_lines_log
obj_data['obj_lines_file'] = obj_data['address_lines_log']
return obj_data
def import_optical_depth_coeff_table(self, file_address):
Data_dict = OrderedDict()
opticalDepthCoeffs_df = read_csv(file_address, delim_whitespace=True, header=0)
opticalDepthCoeffs = {}
for column in opticalDepthCoeffs_df.columns:
opticalDepthCoeffs[column] = opticalDepthCoeffs_df[column].values
return opticalDepthCoeffs
def load_ssp_library(self, ssp_lib_type, data_folder=None, data_file=None, wavelengh_limits=None, resample_inc=None, norm_interval=None):
# TODO In here we need to add a test sample library
# Store stellar base type
sspLib_dict = {'data_type': ssp_lib_type}
# Import the base type
if ssp_lib_type == 'FIT3D':
# Check if more files are being introduced
if ',' in data_file:
ssp_lib1, ssp_lib2 = data_file.split(',') # Corrently we are only using the first one (the big)
else:
ssp_lib1 = data_file
sspLib_dict = self.import_Fit3D_ssplibrary(data_folder + ssp_lib1)
elif ssp_lib_type == 'starlight':
sspLib_dict = self.import_STARLIGHT_ssplibrary(data_folder, data_file)
# Store stellar base type
sspLib_dict['data_type'] = ssp_lib_type
# Trim, resample and normalized the ssp library if required
if wavelengh_limits or resample_inc or norm_interval:
self.treat_input_spectrum(sspLib_dict, sspLib_dict['basesWave'], sspLib_dict['fluxBases'], wavelengh_limits,
resample_inc, norm_interval)
return sspLib_dict
def treat_input_spectrum(self, output_dict, spec_wave, spec_flux, wavelengh_limits=None, resample_inc=None, norm_interval=None):
# TODO we should remove the nBases requirement by some style which can just read the number of dimensions
# Store input values
output_dict['wavelengh_limits'] = wavelengh_limits
output_dict['resample_inc'] = resample_inc
output_dict['norm_interval'] = norm_interval
# Special case using 0, -1 indexing
if wavelengh_limits is not None:
if (wavelengh_limits[0] != 0) and (wavelengh_limits[0] != -1):
inputWaveLimits = wavelengh_limits
else:
inputWaveLimits = wavelengh_limits
if wavelengh_limits[0] == 0:
inputWaveLimits[0] = int(np.ceil(spec_wave[0]) + 1)
if wavelengh_limits[-1] == -1:
inputWaveLimits[-1] = int(np.floor(spec_wave[-1]) - 1)
# Resampling the spectra
if resample_inc is not None:
wave_resam = np.arange(inputWaveLimits[0], inputWaveLimits[-1], resample_inc, dtype=float)
# Loop throught the fluxes (In the case of the bases it is assumed they may have different wavelength ranges)
if isinstance(spec_flux, list):
flux_resam = np.empty((output_dict['nBases'], len(wave_resam)))
for i in range(output_dict['nBases']):
flux_resam[i, :] = interp1d(spec_wave[i], spec_flux[i], bounds_error=True)(wave_resam)
# In case only one dimension
elif spec_flux.ndim == 1:
flux_resam = interp1d(spec_wave, spec_flux, bounds_error=True)(wave_resam)
output_dict['wave_resam'] = wave_resam
output_dict['flux_resam'] = flux_resam
else:
output_dict['wave_resam'] = spec_wave
output_dict['flux_resam'] = spec_flux
# Normalizing the spectra
if norm_interval is not None:
# Loop throught the fluxes (In the case of the bases it is assumed they may have different wavelength ranges)
if isinstance(spec_flux, list):
normFlux_coeff = np.empty(output_dict['nBases'])
flux_norm = np.empty((output_dict['nBases'], len(wave_resam)))
for i in range(output_dict['nBases']):
idx_Wavenorm_min, idx_Wavenorm_max = np.searchsorted(spec_wave[i], norm_interval)
normFlux_coeff[i] = np.mean(spec_flux[i][idx_Wavenorm_min:idx_Wavenorm_max])
flux_norm[i] = output_dict['flux_resam'][i] / normFlux_coeff[i]
elif spec_flux.ndim == 1:
idx_Wavenorm_min, idx_Wavenorm_max = np.searchsorted(spec_wave, norm_interval)
normFlux_coeff = np.mean(spec_flux[idx_Wavenorm_min:idx_Wavenorm_max])
flux_norm = output_dict['flux_resam'] / normFlux_coeff
output_dict['flux_norm'] = flux_norm
output_dict['normFlux_coeff'] = normFlux_coeff
else:
output_dict['flux_norm'] = output_dict['flux_resam']
output_dict['normFlux_coeff'] = 1.0
return
def generate_object_mask(self, linesDf, wavelength, linelabels):
# TODO This will not work for a redshifted lines log
idcs_lineMasks = linesDf.index.isin(linelabels)
idcs_spectrumMasks = ~linesDf.index.isin(linelabels)
# Matrix mask for integring the emission lines
n_lineMasks = idcs_lineMasks.sum()
self.boolean_matrix = np.zeros((n_lineMasks, wavelength.size), dtype=bool)
# Array with line wavelength resolution which we fill with default value (This is because there are lines beyong the continuum range)
self.lineRes = np.ones(n_lineMasks) * (wavelength[1] - wavelength[0])
# Total mask for valid regions in the spectrum
n_objMasks = idcs_spectrumMasks.sum()
self.int_mask = np.ones(wavelength.size, dtype=bool)
self.object_mask = np.ones(wavelength.size, dtype=bool)
# Loop through the emission lines
wmin, wmax = linesDf['w3'].loc[idcs_lineMasks].values, linesDf['w4'].loc[idcs_lineMasks].values
idxMin, idxMax = np.searchsorted(wavelength, [wmin, wmax])
for i in range(n_lineMasks):
if not np.isnan(wmin[i]) and not np.isnan(wmax[i]) and (wmax[i] < wavelength[-1]): # We need this for lines beyong continuum range #TODO propose better
w2, w3 = wavelength[idxMin[i]], wavelength[idxMax[i]]
idx_currentMask = (wavelength >= w2) & (wavelength <= w3)
self.boolean_matrix[i, :] = idx_currentMask
self.int_mask = self.int_mask & ~idx_currentMask
self.lineRes[i] = wavelength[idxMax[i]] - wavelength[idxMax[i] - 1]
# Loop through the object masks
wmin, wmax = linesDf['w3'].loc[idcs_spectrumMasks].values, linesDf['w4'].loc[idcs_spectrumMasks].values
idxMin, idxMax = np.searchsorted(wavelength, [wmin, wmax])
for i in range(n_objMasks):
if not np.isnan(wmin[i]) and not np.isnan(wmax[i]) and (wmax[i] < wavelength[-1]):
w2, w3 = wavelength[idxMin[i]], wavelength[idxMax[i]]
idx_currentMask = (wavelength >= w2) & (wavelength <= w3)
self.int_mask = self.int_mask & ~idx_currentMask
self.object_mask = self.object_mask & ~idx_currentMask
return
| mit |
ueshin/apache-spark | python/pyspark/pandas/tests/test_dataframe_spark_io.py | 14 | 19999 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import unittest
import glob
import os
import numpy as np
import pandas as pd
import pyarrow as pa
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class DataFrameSparkIOTest(PandasOnSparkTestCase, TestUtils):
"""Test cases for big data I/O using Spark."""
@property
def test_column_order(self):
return ["i32", "i64", "f", "bhello"]
@property
def test_pdf(self):
pdf = pd.DataFrame(
{
"i32": np.arange(20, dtype=np.int32) % 3,
"i64": np.arange(20, dtype=np.int64) % 5,
"f": np.arange(20, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=20).astype("O"),
},
columns=self.test_column_order,
index=np.random.rand(20),
)
return pdf
def test_parquet_read(self):
with self.temp_dir() as tmp:
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.parquet(tmp, mode="overwrite")
def check(columns, expected):
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp, columns=columns)
actual = ps.read_parquet(tmp, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
check(None, data)
check(["i32", "i64"], data[["i32", "i64"]])
check(["i64", "i32"], data[["i64", "i32"]])
if LooseVersion(pa.__version__) < LooseVersion("1.0.0"):
# TODO: `pd.read_parquet()` changed the behavior due to PyArrow 1.0.0.
# We might want to adjust the behavior. Let's see how pandas handles it.
check(("i32", "i64"), data[["i32", "i64"]])
check(["a", "b", "i32", "i64"], data[["i32", "i64"]])
check([], pd.DataFrame([]))
check(["a"], pd.DataFrame([]))
check("i32", pd.DataFrame([]))
check("float", data[["f"]])
# check with pyspark patch.
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp)
else:
expected = data
actual = ps.read_parquet(tmp)
self.assertPandasEqual(expected, actual.to_pandas())
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_parquet(tmp, index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_parquet_read_with_pandas_metadata(self):
with self.temp_dir() as tmp:
expected1 = self.test_pdf
path1 = "{}/file1.parquet".format(tmp)
expected1.to_parquet(path1)
self.assert_eq(ps.read_parquet(path1, pandas_metadata=True), expected1)
expected2 = expected1.reset_index()
path2 = "{}/file2.parquet".format(tmp)
expected2.to_parquet(path2)
self.assert_eq(ps.read_parquet(path2, pandas_metadata=True), expected2)
expected3 = expected2.set_index("index", append=True)
path3 = "{}/file3.parquet".format(tmp)
expected3.to_parquet(path3)
self.assert_eq(ps.read_parquet(path3, pandas_metadata=True), expected3)
def test_parquet_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_parquet(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_parquet(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
def test_table(self):
with self.table("test_table"):
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.spark.to_table("test_table", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_table("test_table", mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["bhello"])[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["bhello"])[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["i32", "bhello"])[["f", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["i32", "bhello"])[["f", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_spark_io(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_spark_io(tmp, format="json", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_spark_io(
tmp, format="json", mode="overwrite", partition_cols=["i32", "bhello"]
)
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(path=tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
col_order = ["f", "i32", "i64"]
expected_idx = expected.set_index("bhello")[col_order]
actual_idx = ps.read_spark_io(tmp, format="json", index_col="bhello")[col_order]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
@unittest.skip("openpyxl")
def test_read_excel(self):
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path1)
self.assert_eq(ps.read_excel(open(path1, "rb")), pd.read_excel(open(path1, "rb")))
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0),
pd.read_excel(open(path1, "rb"), index_col=0),
)
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
pd.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(path1), pd.read_excel(path1))
self.assert_eq(ps.read_excel(path1, index_col=0), pd.read_excel(path1, index_col=0))
self.assert_eq(
ps.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path1, index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(tmp), pd.read_excel(path1))
path2 = "{}/file2.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path2)
self.assert_eq(
ps.read_excel(tmp, index_col=0).sort_index(),
pd.concat(
[pd.read_excel(path1, index_col=0), pd.read_excel(path2, index_col=0)]
).sort_index(),
)
self.assert_eq(
ps.read_excel(tmp, index_col=0, squeeze=True).sort_index(),
pd.concat(
[
pd.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path2, index_col=0, squeeze=True),
]
).sort_index(),
)
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
with pd.ExcelWriter(path1) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
sheet_names = [["Sheet_name_1", "Sheet_name_2"], None]
pdfs1 = pd.read_excel(open(path1, "rb"), sheet_name=None, index_col=0)
pdfs1_squeezed = pd.read_excel(
open(path1, "rb"), sheet_name=None, index_col=0, squeeze=True
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(open(path1, "rb"), sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(
open(path1, "rb"), sheet_name=sheet_name, index_col=0, squeeze=True
)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
self.assert_eq(
ps.read_excel(tmp, index_col=0, sheet_name="Sheet_name_2"),
pdfs1["Sheet_name_2"],
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
path2 = "{}/file2.xlsx".format(tmp)
with pd.ExcelWriter(path2) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
pdfs2 = pd.read_excel(path2, sheet_name=None, index_col=0)
pdfs2_squeezed = pd.read_excel(path2, sheet_name=None, index_col=0, squeeze=True)
self.assert_eq(
ps.read_excel(tmp, sheet_name="Sheet_name_2", index_col=0).sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
self.assert_eq(
ps.read_excel(
tmp, sheet_name="Sheet_name_2", index_col=0, squeeze=True
).sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat([pdfs1["Sheet_name_1"], pdfs2["Sheet_name_1"]]).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_1"], pdfs2_squeezed["Sheet_name_1"]]
).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
def test_read_orc(self):
with self.temp_dir() as tmp:
path = "{}/file1.orc".format(tmp)
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.orc(path, mode="overwrite")
# `spark.write.orc` create a directory contains distributed orc files.
# But pandas only can read from file, not directory. Therefore, we need orc file path.
orc_file_path = glob.glob(os.path.join(path, "*.orc"))[0]
expected = data.reset_index()[data.columns]
actual = ps.read_orc(path)
self.assertPandasEqual(expected, actual.to_pandas())
# columns
columns = ["i32", "i64"]
expected = data.reset_index()[columns]
actual = ps.read_orc(path, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
# index_col
expected = data.set_index("i32")
actual = ps.read_orc(path, index_col="i32")
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])
actual = ps.read_orc(path, index_col=["i32", "f"])
self.assert_eq(actual, expected)
# index_col with columns
expected = data.set_index("i32")[["i64", "bhello"]]
actual = ps.read_orc(path, index_col=["i32"], columns=["i64", "bhello"])
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])[["bhello", "i64"]]
actual = ps.read_orc(path, index_col=["i32", "f"], columns=["bhello", "i64"])
self.assert_eq(actual, expected)
msg = "Unknown column name 'i'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns="i32")
msg = "Unknown column name 'i34'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns=["i34", "i64"])
def test_orc_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_orc(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_orc(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe_spark_io import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/indexes/multi/test_drop.py | 1 | 5847 | import warnings
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_drop(idx):
dropped = idx.drop([("foo", "two"), ("qux", "one")])
index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")])
dropped2 = idx.drop(index)
expected = idx[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = idx.drop(["bar"])
expected = idx[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop("foo")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(index)
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(["foo", "two"])
# partially correct argument
mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(mixed_index)
# error='ignore'
dropped = idx.drop(index, errors="ignore")
expected = idx[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(["foo", "two"], errors="ignore")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = idx.drop(["foo", ("qux", "one")])
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ["foo", ("qux", "one"), "two"]
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(mixed_index)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(idx):
index = idx[idx.get_loc("foo")]
dropped = index.droplevel(0)
assert dropped.name == "second"
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index.droplevel(0)
assert dropped.names == ("two", "three")
dropped = index.droplevel("two")
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list():
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index[:2].droplevel(["three", "one"])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
msg = (
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left"
)
with pytest.raises(ValueError, match=msg):
index[:2].droplevel(["one", "two", "three"])
with pytest.raises(KeyError, match="'Level four not found'"):
index[:2].droplevel(["one", "four"])
def test_drop_not_lexsorted():
# GH 12078
# define the lexsorted version of the multi-index
tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
df = df.pivot_table(index="a", columns=["b", "c"], values="d")
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop("a"), not_lexsorted_mi.drop("a"))
def test_drop_with_nan_in_index(nulls_fixture):
# GH#18853
mi = MultiIndex.from_tuples([("blah", nulls_fixture)], names=["name", "date"])
msg = r"labels \[Timestamp\('2001-01-01 00:00:00'\)\] not found in level"
with pytest.raises(KeyError, match=msg):
mi.drop(pd.Timestamp("2001"), level="date")
def test_drop_with_non_monotonic_duplicates():
# GH#33494
mi = MultiIndex.from_tuples([(1, 2), (2, 3), (1, 2)])
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
result = mi.drop((1, 2))
expected = MultiIndex.from_tuples([(2, 3)])
tm.assert_index_equal(result, expected)
def test_single_level_drop_partially_missing_elements():
# GH 37820
mi = MultiIndex.from_tuples([(1, 2), (2, 2), (3, 2)])
msg = r"labels \[4\] not found in level"
with pytest.raises(KeyError, match=msg):
mi.drop(4, level=0)
with pytest.raises(KeyError, match=msg):
mi.drop([1, 4], level=0)
msg = r"labels \[nan\] not found in level"
with pytest.raises(KeyError, match=msg):
mi.drop([np.nan], level=0)
with pytest.raises(KeyError, match=msg):
mi.drop([np.nan, 1, 2, 3], level=0)
mi = MultiIndex.from_tuples([(np.nan, 1), (1, 2)])
msg = r"labels \['a'\] not found in level"
with pytest.raises(KeyError, match=msg):
mi.drop([np.nan, 1, "a"], level=0)
| gpl-2.0 |
Jegp/multimodalrnnproject | unimodal_video.py | 1 | 2625 | from __future__ import print_function
import numpy
import numpy as np
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.externals import joblib
from keras.utils.np_utils import to_categorical
from keras.utils.vis_utils import model_to_dot
from precision import precision, recall, fmeasure
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform, conditional
import matplotlib.pyplot as plt
def data():
X_train, X_test, y_train, y_test = joblib.load('305010.pkl')[0]
X_train = numpy.array(X_train)
X_train = X_train.reshape((X_train.shape[0], 1, X_train.shape[1]))
y_train = to_categorical(y_train)
X_test = numpy.array(X_test)
X_test = X_test.reshape((X_test.shape[0], 1, X_test.shape[1]))
y_test = to_categorical(y_test)
max_features = X_train.shape[2]
maxlen = X_train.shape[0]
return X_train, y_train, X_test, y_test, max_features, maxlen
def model(X_train, y_train, X_test, y_test, max_features, maxlen):
model = Sequential()
model.add(LSTM(210,
input_shape = (1, max_features),
return_sequences = True))
model.add(LSTM(120,
return_sequences = False))
# Avoid overfitting by dropping data
model.add(Dropout(0.651797))
# Regular dense nn with sigmoid activation function
model.add(Dense(y_train.shape[1], activation = 'softmax'))
## Compile model
model.compile(
loss='categorical_crossentropy'
, optimizer='rmsprop'
, metrics = ['accuracy', precision, recall, fmeasure] # Collect accuracy metric
)
## Print model as dot
dot = model_to_dot(model)
dot.write_raw("model_video.dot")
## Fit model
history = model.fit(X_train, y_train,
batch_size=256,
epochs=50,
validation_data=(X_test, y_test),
callbacks=[])
# summarize history for accuracy
print(history.history.keys())
plt.plot(history.history['val_acc'])
plt.plot(history.history['val_loss'])
plt.title('Audio model')
plt.xlabel('Epoch')
plt.legend(['Accuracy', 'Loss'], loc='upper right')
plt.savefig('model_video.png')
if __name__ == '__main__':
X_train, y_train, X_test, y_test, m, n = data()
model(X_train, y_train, X_test, y_test, m, n)
| gpl-3.0 |
ruymanengithub/vison | vison/inject/CHINJ02.py | 1 | 20117 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
VIS Ground Calibration
TEST: CHINJ02
Charge injection calibration (part 2)
Injection vs. IDL (injection threshold)
Created on Tue Aug 29 17:36:00 2017
:author: Ruyman Azzollini
"""
# IMPORT STUFF
import numpy as np
from pdb import set_trace as stop
import os
import copy
from collections import OrderedDict
import pandas as pd
from vison.datamodel import cdp
from vison.pipe.task import HKKeys
from vison.support import context, utils
from vison.datamodel import scriptic as sc
from vison.datamodel import inputs
from .InjTask import InjTask
from vison.image import performance
from . import CH02aux
from vison.inject import lib as ilib
from .InjTask import InjTask, _get_CCDhalf
# END IMPORT
isthere = os.path.exists
# HKKeys = ['CCD1_OD_T', 'CCD2_OD_T', 'CCD3_OD_T', 'COMM_RD_T',
# 'CCD2_IG1_T', 'CCD3_IG1_T', 'CCD1_TEMP_T', 'CCD2_TEMP_T', 'CCD3_TEMP_T',
# 'CCD1_IG1_T', 'COMM_IG2_T', 'FPGA_PCB_TEMP_T', 'CCD1_OD_B',
# 'CCD2_OD_B', 'CCD3_OD_B', 'COMM_RD_B', 'CCD2_IG1_B', 'CCD3_IG1_B', 'CCD1_TEMP_B',
# 'CCD2_TEMP_B', 'CCD3_TEMP_B', 'CCD1_IG1_B', 'COMM_IG2_B']
IG1comm = 6.
IG2comm = 4.
CHINJ02_commvalues = dict(program='CALCAMP', test='CHINJ02',
flushes=7, siflsh=1, siflsh_p=500,
inisweep=1,
vstart=0, vend=2086,
chinj=1, chinj_on=30, chinj_of=100,
id_wid=60,
exptime=0., shuttr=0, e_shuttr=0,
mirr_on=0,
wave=4,
motr_on=0,
source='flat',
comments='')
class CHINJ02_inputs(inputs.Inputs):
manifesto = inputs.CommonTaskInputs.copy()
manifesto.update(OrderedDict(sorted([
('IDLs', ([list], 'Injection Drain Low Voltages List: [min, max].')),
('dIDL', ([float], 'Injection Drain Voltage Step.')),
('IDH', ([float], 'Injection Drain High Voltage.')),
('IG1', ([float], 'Injection Gate 1 Voltage.')),
('IG2', ([float], 'Injection Gate 2 Voltage.')),
('id_delays', ([list], 'Injection Drain Delays.')),
('toi_chinj', ([int], 'TOI Charge Injection.')),
])))
class CHINJ02(InjTask):
""" """
inputsclass = CHINJ02_inputs
def __init__(self, inputs, log=None, drill=False, debug=False, cleanafter=False):
""" """
self.subtasks = [('check', self.check_data),
('prep', self.prepare_images),
('basic', self.basic_analysis),
('meta', self.meta_analysis)]
super(CHINJ02, self).__init__(inputs=inputs, log=log, drill=drill,
debug=debug, cleanafter=cleanafter)
self.name = 'CHINJ02'
self.type = 'Simple'
self.HKKeys = HKKeys
self.CDP_lib = CH02aux.get_CDP_lib()
self.figdict = CH02aux.get_CH02figs()
self.inputs['subpaths'] = dict(figs='figs',
ccdpickles='ccdpickles',
products='products')
def set_inpdefaults(self, **kwargs):
""" """
toi_chinj = 500
self.inpdefaults = dict(
IDLs=[10., 13.],
dIDL=0.25,
IG1=IG1comm,
IG2=IG2comm,
IDH=18.,
id_delays=[toi_chinj * 2.5, toi_chinj * 1.5],
toi_chinj=toi_chinj
)
def set_perfdefaults(self, **kwargs):
super(CHINJ02, self).set_perfdefaults(**kwargs)
Flu_lims, FluGrad_lims = self.get_FluenceAndGradient_limits()
self.perfdefaults['Flu_lims'] = Flu_lims.copy()
self.perfdefaults['FluGrad_lims'] = FluGrad_lims.copy()
def build_scriptdict(self, diffvalues=dict(), elvis=context.elvis):
"""
Builds CHINJ02 script structure dictionary.
#:param IDLs: list of 2 ints, [V], [min,max] values of IDL (Inject. Drain Low).
#:param IDH: int, [V], Injection Drain High.
#:param id_delays: list of 2 ints, [us], injection drain delays.
#:param toi_chinj: int, [us], TOI-charge injection.
:param diffvalues: dict, opt, differential values.
"""
IDLs = self.inputs['IDLs']
dIDL = self.inputs['dIDL']
IDH = self.inputs['IDH']
IG1 = self.inputs['IG1']
IG2 = self.inputs['IG2']
id_delays = self.inputs['id_delays']
toi_chinj = self.inputs['toi_chinj']
assert len(IDLs) == 2
assert len(id_delays) == 2
NIDL = (IDLs[1] - IDLs[0]) / dIDL + 1
IDLv = np.arange(NIDL) * dIDL + IDLs[0]
CHINJ02_sdict = dict()
# First Injection Drain Delay
colcounter = 1
for i, IDL in enumerate(IDLv):
colkey = 'col%03i' % (i + 1,)
CHINJ02_sdict[colkey] = dict(frames=1, IDL=IDL, IDH=IDH,
IG1_1_T=IG1, IG1_2_T=IG1, IG1_3_T=IG1,
IG1_1_B=IG1, IG1_2_B=IG1, IG1_3_B=IG1,
IG2_T=IG2, IG2_B=IG2,
id_dly=id_delays[0], toi_ch=toi_chinj)
colcounter += 1
# Second Injection Drain Delay
colstart = colcounter
for j, IDL in enumerate(IDLv):
colkey = 'col%03i' % (colstart + j,)
CHINJ02_sdict[colkey] = dict(frames=1, IDL=IDL, IDH=IDH,
IG1_1_T=IG1, IG1_2_T=IG1, IG1_3_T=IG1,
IG1_1_B=IG1, IG1_2_B=IG1, IG1_3_B=IG1,
IG2_T=IG2, IG2_B=IG2,
id_dly=id_delays[1], toi_ch=toi_chinj)
Ncols = len(list(CHINJ02_sdict.keys()))
CHINJ02_sdict['Ncols'] = Ncols
commvalues = copy.deepcopy(sc.script_dictionary[elvis]['defaults'])
commvalues.update(CHINJ02_commvalues)
if len(diffvalues) == 0:
try:
diffvalues = self.inputs['diffvalues']
except BaseException:
diffvalues = diffvalues = dict()
CHINJ02_sdict = sc.update_structdict(
CHINJ02_sdict, commvalues, diffvalues)
return CHINJ02_sdict
def filterexposures(self, structure, explog, OBSID_lims):
""" """
wavedkeys = ['motr_siz']
return super(CHINJ02, self).filterexposures(structure, explog, OBSID_lims, colorblind=True,
wavedkeys=wavedkeys)
def prepare_images(self):
super(CHINJ02, self).prepare_images(doExtract=True,
doBadPixels=True,
doMask=True, # ON TESTS!
doOffset=True,
doBias=False,
doFF=False)
def meta_analysis(self):
"""
Finds the Injection Threshold for each CCD half.
**METACODE**
::
f.e.CCD:
f.e.Q:
load injection vs. IDL cuve
find&save injection threshold on curve
report injection threshold as a table
"""
if self.report is not None:
self.report.add_Section(
keyword='meta', Title='CHINJ02 Analysis ("Meta")', level=0)
self.report.add_Text(['Model:',
'\\begin{equation}',
'I=b+\\frac{A}{1+e^{K(IDL-X_T)}}',
'\end{equation}'])
DDindices = copy.deepcopy(self.dd.indices)
nObs, nCCD, nQuad = DDindices.shape[0:3]
Quads = DDindices.get_vals('Quad')
CCDs = DDindices.get_vals('CCD')
prodspath = self.inputs['subpaths']['products']
function, module = utils.get_function_module()
CDP_header = self.CDP_header.copy()
CDP_header.update(dict(function=function, module=module))
CDP_header['DATE'] = self.get_time_tag()
toi_ch = self.dd.mx['toi_ch'][0, 0]
assert np.all(np.isclose(self.dd.mx['toi_ch'][:], toi_ch))
# ANALYSIS TABLE
CCDhalves = ['top', 'bottom']
NPraw = nObs
NPfit = nCCD * nQuad
MCH02raw_dd = OrderedDict()
MCH02raw_dd['meta'] = OrderedDict()
_Quads_dict = dict(bottom=['E', 'F'],
top=['G', 'H'])
MCH02fit_dd = OrderedDict()
MCH02fit_dd['meta'] = OrderedDict()
# INJECTION CURVES
inj_curves_cdp = cdp.CDP()
inj_curves_cdp.header = CDP_header.copy()
inj_curves_cdp.path = prodspath
inj_curves_cdp.data = OrderedDict()
inj_curves_cdp.data['labelkeys'] = ['data', 'bestfit']
xdummy = np.arange(10, dtype='float32')
ydummy = np.zeros(10, dtype='float32')
for jCCD, CCDk in enumerate(CCDs):
inj_curves_cdp.data[CCDk] = OrderedDict()
for kQ, Q in enumerate(Quads):
inj_curves_cdp.data[CCDk][Q] = OrderedDict(x=OrderedDict(),
y=OrderedDict())
for tag in ['data', 'bestfit']:
inj_curves_cdp.data[CCDk][Q]['x'][tag] = xdummy.copy()
inj_curves_cdp.data[CCDk][Q]['y'][tag] = ydummy.copy()
for CCDhalf in CCDhalves:
MCH02raw_dd[CCDhalf] = OrderedDict()
MCH02raw_dd['meta'][CCDhalf] = OrderedDict()
MCH02raw_dd[CCDhalf]['IDL'] = np.zeros(NPraw, dtype='float32') + np.nan
for jCCD, CCDk in enumerate(CCDs):
for Q in _Quads_dict[CCDhalf]:
MCH02raw_dd[CCDhalf]['INJ_%s%s' %
(jCCD + 1, Q)] = np.zeros(NPraw, dtype='float32') + np.nan
MFCH02_dd = OrderedDict()
MFCH02_dd['CCD'] = np.zeros(NPfit, dtype='int32')
MFCH02_dd['Q'] = np.zeros(NPfit, dtype='int32')
MFCH02_dd['ID_DLY'] = np.zeros(NPfit, dtype='float32') + np.nan
fitkeys = ['BGD', 'A', 'K', 'XT']
for fitkey in fitkeys:
MFCH02_dd[fitkey] = np.zeros(NPfit, dtype='float32') + np.nan
MCH02_dd = OrderedDict()
MCH02_dd['CCD'] = np.zeros(NPfit, dtype='int32')
MCH02_dd['Q'] = np.zeros(NPfit, dtype='int32')
MCH02_dd['ID_DLY'] = np.zeros(NPfit, dtype='float32') + np.nan
mkeys = ['BGD_ADU', 'A_ADU', 'K', 'IDL_THRESH']
for mkey in mkeys:
MCH02_dd[mkey] = np.zeros(NPfit, dtype='float32') + np.nan
# First we fill in the table of "raw" results (injections vs. IDL)
for CCDhalf in CCDhalves:
_Quads = _Quads_dict[CCDhalf]
if CCDhalf == 'bottom':
id_dly_opt = toi_ch * 2.5
elif CCDhalf == 'top':
id_dly_opt = toi_ch * 1.5
for jCCD, CCDk in enumerate(CCDs):
selix = np.where((self.dd.mx['chinj'][:, jCCD] == 1) &
(np.isclose(self.dd.mx['id_dly'][:, jCCD], id_dly_opt)))
IDL = self.dd.mx['IDL'][selix, jCCD].flatten().copy()
for Q in _Quads:
kQ = Quads.index(Q)
med_inj = self.dd.mx['chinj_p50'][selix, jCCD, kQ].flatten().copy()
MCH02raw_dd[CCDhalf]['IDL'] = IDL.copy()
MCH02raw_dd[CCDhalf]['INJ_%s%s' % (jCCD + 1, Q)] = med_inj.copy()
inj_curves_cdp.data[CCDk][Q]['x']['data'] = IDL.copy()
inj_curves_cdp.data[CCDk][Q]['y']['data'] = med_inj.copy()
MCH02raw_dd['meta'][CCDhalf]['toi_ch'] = toi_ch
MCH02raw_dd['meta'][CCDhalf]['id_dly'] = id_dly_opt
MCH02raw_dd['meta'][CCDhalf]['IDH'] = self.dd.mx['IDH'][selix, jCCD].flatten()[0]
IG1key = 'IG1_1_%s' % CCDhalf[0].upper()
MCH02raw_dd['meta'][CCDhalf]['IG1'] = self.dd.mx[IG1key][selix, jCCD].flatten()[0]
IG2key = 'IG2_%s' % CCDhalf[0].upper()
MCH02raw_dd['meta'][CCDhalf]['IG2'] = self.dd.mx[IG2key][selix, jCCD].flatten()[0]
# Now we fill in the results from the fits
for jCCD, CCDk in enumerate(CCDs):
for kQ, Q in enumerate(Quads):
ix = jCCD * nQuad + kQ
if Q in ['E', 'F']:
id_dly_opt = toi_ch * 2.5
elif Q in['G', 'H']:
id_dly_opt = toi_ch * 1.5
selix = np.where((self.dd.mx['chinj'][:, jCCD] == 1) &
(np.isclose(self.dd.mx['id_dly'][:, jCCD], id_dly_opt)))
IDL = self.dd.mx['IDL'][selix, jCCD].flatten().copy()
med_inj = self.dd.mx['chinj_p50'][selix, jCCD, kQ].flatten().copy()
doPlot = False
debug = False
# if (jCCD==1) and (Q=='G'):
# doPlot=True
# debug=True
res = ilib.fit_Inj_vs_IDL(IDL, med_inj, doPlot=doPlot,
debug=debug)
didfit = res['didfit']
MFCH02_dd['CCD'][ix] = jCCD + 1
MFCH02_dd['Q'][ix] = kQ + 1
MCH02_dd['CCD'][ix] = jCCD + 1
MCH02_dd['Q'][ix] = kQ + 1
if didfit:
# fit parameters
MFCH02_dd['ID_DLY'][ix] = id_dly_opt
MCH02_dd['ID_DLY'][ix] = id_dly_opt
for fitkey in fitkeys:
MFCH02_dd[fitkey][ix] = res[fitkey]
xbf = res['IDL_BF'].copy()
ybf = res['NORMINJ_BF'] * 2.**16
# Derived parameters
# mkeys = ['BGD','A_ADU']
bgd = res['BGD']
a = res['A']
xT = res['XT']
MCH02_dd['BGD_ADU'][ix] = bgd * 2**16 # ADU
MCH02_dd['A_ADU'][ix] = a * 2**16 # ADU
MCH02_dd['IDL_THRESH'][ix] = xT
else:
xbf = IDL.copy()
ybf = np.zeros_like(xbf)
inj_curves_cdp.data[CCDk][Q]['x']['bestfit'] = xbf.copy()
inj_curves_cdp.data[CCDk][Q]['y']['bestfit'] = ybf.copy()
# PLOT
fdict_meta_plot = self.figdict['CH02_meta'][1]
fdict_meta_plot['data'] = inj_curves_cdp.data.copy()
if self.report is not None:
self.addFigures_ST(figkeys=['CH02_meta'],
dobuilddata=False)
# REPORT (RAW) RESULTS AS TABLE CDPs
for CCDhalf in CCDhalves:
MCH02rawhalf_dddf = OrderedDict(ANALYSIS=pd.DataFrame.from_dict(MCH02raw_dd[CCDhalf]))
MCH02rawhalf_cdp = self.CDP_lib['METARAW']
MCH02rawhalf_cdp.path = prodspath
MCH02rawhalf_cdp.rootname += '_%s' % CCDhalf
MCH02rawhalf_cdp.ingest_inputs(
data=MCH02rawhalf_dddf.copy(),
meta=MCH02raw_dd['meta'][CCDhalf].copy(),
header=CDP_header.copy()
)
MCH02rawhalf_cdp.init_wb_and_fillAll(
header_title='CHINJ02: META-ANALYSIS RAW [%s]' % CCDhalf)
self.save_CDP(MCH02rawhalf_cdp)
self.pack_CDP_to_dd(MCH02rawhalf_cdp, 'META_RAW_%s' % CCDhalf)
if self.report is not None:
def ff(x): return '%.2f' % x
selcolumns = ['IDL']
for jCCD, CCDk in enumerate(CCDs):
for Q in _Quads_dict[CCDhalf]:
selcolumns.append('INJ_%s%s' % (jCCD + 1, Q))
ext_formatters = [ff] * len(selcolumns)
caption = 'CHINJ02: META-ANALYSIS (RAW) TABLE. CCD-half = %s. ' +\
'IDH = %.2f V, IG1 = %.2f V, IG2 = %.2f V, toi\_chj = % us, ' +\
'id\_dly=%.1f us.'
caption = caption % (CCDhalf,
MCH02rawhalf_cdp.meta['IDH'],
MCH02rawhalf_cdp.meta['IG1'],
MCH02rawhalf_cdp.meta['IG2'],
MCH02rawhalf_cdp.meta['toi_ch'],
MCH02rawhalf_cdp.meta['id_dly'])
Mtex = MCH02rawhalf_cdp.get_textable(sheet='ANALYSIS',
columns=selcolumns,
caption=caption,
fitwidth=True,
tiny=True,
formatters=ext_formatters,
index=False)
self.report.add_Text(Mtex)
# REPORT FIT RESULTS AS TABLE CDPs
MFCH02_dddf = OrderedDict(ANALYSIS=pd.DataFrame.from_dict(MFCH02_dd))
MFCH02_cdp = self.CDP_lib['METAFIT']
MFCH02_cdp.path = prodspath
MFCH02_cdp.ingest_inputs(
data=MFCH02_dddf.copy(),
meta=dict(),
header=CDP_header.copy()
)
MFCH02_cdp.init_wb_and_fillAll(header_title='CHINJ02: MODEL FIT')
self.save_CDP(MFCH02_cdp)
self.pack_CDP_to_dd(MFCH02_cdp, 'METAFIT_CDP')
if self.report is not None:
def fccd(x): return CCDs[x - 1]
def fq(x): return Quads[x - 1]
def ff(x): return '%.3f' % x
selcolumns = ['CCD', 'Q', 'BGD', 'K', 'A', 'XT']
ext_formatters = [fccd, fq] + [ff, ff, ff, ff]
caption = 'CHINJ02: Model parameters. Notice that the model fits injection values divided by $2^{16}$. ' +\
'BGD [adim.], A [adim.], K [adim.], XT [V]'
MFtex = MFCH02_cdp.get_textable(sheet='ANALYSIS',
columns=selcolumns,
caption=caption,
fitwidth=True,
tiny=True,
formatters=ext_formatters,
index=False)
self.report.add_Text(MFtex)
# REPORT DERIVED FIT RESULTS AS TABLE CDP
MCH02_dddf = OrderedDict(ANALYSIS=pd.DataFrame.from_dict(MCH02_dd))
MCH02_cdp = self.CDP_lib['META']
MCH02_cdp.path = prodspath
MCH02_cdp.ingest_inputs(
data=MCH02_dddf.copy(),
meta=dict(),
header=CDP_header.copy()
)
MCH02_cdp.init_wb_and_fillAll(header_title='CHINJ02: META-ANALYSIS')
self.save_CDP(MCH02_cdp)
self.pack_CDP_to_dd(MCH02_cdp, 'META_CDP')
if self.report is not None:
def fccd(x): return CCDs[x - 1]
def fq(x): return Quads[x - 1]
def ff(x): return '%.3f' % x
selcolumns = ['CCD', 'Q', 'BGD_ADU', 'A_ADU', 'IDL_THRESH']
ext_formatters = [fccd, fq] + [ff, ff, ff]
caption = 'CHINJ02: Model - derived values. ' +\
'BGD\_ADU: background level in ADUs; ' +\
'A\_ADU: injection level in ADUs; ' +\
'IDL\_THRESH: threshold voltage.'
Mtex = MCH02_cdp.get_textable(sheet='ANALYSIS',
columns=selcolumns,
caption=caption,
fitwidth=True,
tiny=True,
formatters=ext_formatters,
index=False)
self.report.add_Text(Mtex)
self.canbecleaned = True
| gpl-3.0 |
yochow/autotest | new_tko/tko/graphing_utils.py | 1 | 32535 | import base64, os, tempfile, operator, pickle, datetime, django.db
import os.path, getpass
from math import sqrt
# When you import matplotlib, it tries to write some temp files for better
# performance, and it does that to the directory in MPLCONFIGDIR, or, if that
# doesn't exist, the home directory. Problem is, the home directory is not
# writable when running under Apache, and matplotlib's not smart enough to
# handle that. It does appear smart enough to handle the files going
# away after they are written, though.
temp_dir = os.path.join(tempfile.gettempdir(),
'.matplotlib-%s' % getpass.getuser())
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
os.environ['MPLCONFIGDIR'] = temp_dir
import matplotlib
matplotlib.use('Agg')
import matplotlib.figure, matplotlib.backends.backend_agg
import StringIO, colorsys, PIL.Image, PIL.ImageChops
from autotest_lib.frontend.afe import readonly_connection
from autotest_lib.frontend.afe.model_logic import ValidationError
from autotest_lib.frontend.afe.simplejson import encoder
from autotest_lib.client.common_lib import global_config
from new_tko.tko import models, tko_rpc_utils
_FIGURE_DPI = 100
_FIGURE_WIDTH_IN = 10
_FIGURE_BOTTOM_PADDING_IN = 2 # for x-axis labels
_SINGLE_PLOT_HEIGHT = 6
_MULTIPLE_PLOT_HEIGHT_PER_PLOT = 4
_MULTIPLE_PLOT_MARKER_TYPE = 'o'
_MULTIPLE_PLOT_MARKER_SIZE = 4
_SINGLE_PLOT_STYLE = 'bs-' # blue squares with lines connecting
_SINGLE_PLOT_ERROR_BAR_COLOR = 'r'
_LEGEND_FONT_SIZE = 'xx-small'
_LEGEND_HANDLE_LENGTH = 0.03
_LEGEND_NUM_POINTS = 3
_LEGEND_MARKER_TYPE = 'o'
_LINE_XTICK_LABELS_SIZE = 'x-small'
_BAR_XTICK_LABELS_SIZE = 8
_json_encoder = encoder.JSONEncoder()
class NoDataError(Exception):
"""\
Exception to raise if the graphing query returned an empty resultset.
"""
def _colors(n):
"""\
Generator function for creating n colors. The return value is a tuple
representing the RGB of the color.
"""
for i in xrange(n):
yield colorsys.hsv_to_rgb(float(i) / n, 1.0, 1.0)
def _resort(kernel_labels, list_to_sort):
"""\
Resorts a list, using a list of kernel strings as the keys. Returns the
resorted list.
"""
labels = [tko_rpc_utils.KernelString(label) for label in kernel_labels]
resorted_pairs = sorted(zip(labels, list_to_sort))
# We only want the resorted list; we are not interested in the kernel
# strings.
return [pair[1] for pair in resorted_pairs]
def _quote(string):
return "%s%s%s" % ("'", string.replace("'", r"\'"), "'")
_HTML_TEMPLATE = """\
<html><head></head><body>
<img src="data:image/png;base64,%s" usemap="#%s"
border="0" alt="graph">
<map name="%s">%s</map>
</body></html>"""
_AREA_TEMPLATE = """\
<area shape="rect" coords="%i,%i,%i,%i" title="%s"
href="#"
onclick="%s(%s); return false;">"""
class MetricsPlot(object):
def __init__(self, query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback):
"""
query_dict: dictionary containing the main query and the drilldown
queries. The main query returns a row for each x value. The first
column contains the x-axis label. Subsequent columns contain data
for each series, named by the column names. A column named
'errors-<x>' will be interpreted as errors for the series named <x>.
plot_type: 'Line' or 'Bar', depending on the plot type the user wants
inverted_series: list of series that should be plotted on an inverted
y-axis
normalize_to:
None - do not normalize
'first' - normalize against the first data point
'x__%s' - normalize against the x-axis value %s
'series__%s' - normalize against the series %s
drilldown_callback: name of drilldown callback method.
"""
self.query_dict = query_dict
if plot_type == 'Line':
self.is_line = True
elif plot_type == 'Bar':
self.is_line = False
else:
raise ValidationError({'plot' : 'Plot must be either Line or Bar'})
self.plot_type = plot_type
self.inverted_series = inverted_series
self.normalize_to = normalize_to
if self.normalize_to is None:
self.normalize_to = ''
self.drilldown_callback = drilldown_callback
class QualificationHistogram(object):
def __init__(self, query, filter_string, interval, drilldown_callback):
"""
query: the main query to retrieve the pass rate information. The first
column contains the hostnames of all the machines that satisfied the
global filter. The second column (titled 'total') contains the total
number of tests that ran on that machine and satisfied the global
filter. The third column (titled 'good') contains the number of
those tests that passed on that machine.
filter_string: filter to apply to the common global filter to show the
Table View drilldown of a histogram bucket
interval: interval for each bucket. E.g., 10 means that buckets should
be 0-10%, 10%-20%, ...
"""
self.query = query
self.filter_string = filter_string
self.interval = interval
self.drilldown_callback = drilldown_callback
def _create_figure(height_inches):
"""\
Creates an instance of matplotlib.figure.Figure, given the height in inches.
Returns the figure and the height in pixels.
"""
fig = matplotlib.figure.Figure(
figsize=(_FIGURE_WIDTH_IN, height_inches + _FIGURE_BOTTOM_PADDING_IN),
dpi=_FIGURE_DPI, facecolor='white')
fig.subplots_adjust(bottom=float(_FIGURE_BOTTOM_PADDING_IN) / height_inches)
return (fig, fig.get_figheight() * _FIGURE_DPI)
def _create_line(plots, labels, plot_info):
"""\
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data. Each dict contains:
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
# when we're doing any kind of normalization, all series get put into a
# single plot
single = bool(plot_info.normalize_to)
area_data = []
lines = []
if single:
plot_height = _SINGLE_PLOT_HEIGHT
else:
plot_height = _MULTIPLE_PLOT_HEIGHT_PER_PLOT * len(plots)
figure, height = _create_figure(plot_height)
if single:
subplot = figure.add_subplot(1, 1, 1)
# Plot all the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
needs_invert = (plot['label'] in plot_info.inverted_series)
# Add a new subplot, if user wants multiple subplots
# Also handle axis inversion for subplots here
if not single:
subplot = figure.add_subplot(len(plots), 1, plot_index + 1)
subplot.set_title(plot['label'])
if needs_invert:
# for separate plots, just invert the y-axis
subplot.set_ylim(1, 0)
elif needs_invert:
# for a shared plot (normalized data), need to invert the y values
# manually, since all plots share a y-axis
plot['y'] = [-y for y in plot['y']]
# Plot the series
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
if single:
lines += subplot.plot(plot['x'], plot['y'], label=plot['label'],
marker=_MULTIPLE_PLOT_MARKER_TYPE,
markersize=_MULTIPLE_PLOT_MARKER_SIZE)
error_bar_color = lines[-1].get_color()
else:
lines += subplot.plot(plot['x'], plot['y'], _SINGLE_PLOT_STYLE,
label=plot['label'])
error_bar_color = _SINGLE_PLOT_ERROR_BAR_COLOR
if plot['errors']:
subplot.errorbar(plot['x'], plot['y'], linestyle='None',
yerr=plot['errors'], color=error_bar_color)
subplot.set_xticklabels([])
# Construct the information for the drilldowns.
# We need to do this in a separate loop so that all the data is in
# matplotlib before we start calling transform(); otherwise, it will return
# incorrect data because it hasn't finished adjusting axis limits.
for line in lines:
# Get the pixel coordinates of each point on the figure
x = line.get_xdata()
y = line.get_ydata()
label = line.get_label()
icoords = line.get_transform().transform(zip(x,y))
# Get the appropriate drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes (hover-over tool-tips)
x_labels = [labels[x_val] for x_val in x]
titles = ['%s - %s: %f' % (label, x_label, y_val)
for x_label, y_val in zip(x_labels, y)]
# Get the appropriate parameters for the drilldown query
params = [dict(query=drill, series=line.get_label(), param=x_label)
for x_label in x_labels]
area_data += [dict(left=ix - 5, top=height - iy - 5,
right=ix + 5, bottom=height - iy + 5,
title= title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ix, iy), title, param_dict
in zip(icoords, titles, params)]
subplot.set_xticklabels(labels, rotation=90, size=_LINE_XTICK_LABELS_SIZE)
# Show the legend if there are not multiple subplots
if single:
font_properties = matplotlib.font_manager.FontProperties(
size=_LEGEND_FONT_SIZE)
legend = figure.legend(lines, [plot['label'] for plot in plots],
prop=font_properties,
handlelen=_LEGEND_HANDLE_LENGTH,
numpoints=_LEGEND_NUM_POINTS)
# Workaround for matplotlib not keeping all line markers in the legend -
# it seems if we don't do this, matplotlib won't keep all the line
# markers in the legend.
for line in legend.get_lines():
line.set_marker(_LEGEND_MARKER_TYPE)
return (figure, area_data)
def _get_adjusted_bar(x, bar_width, series_index, num_plots):
"""\
Adjust the list 'x' to take the multiple series into account. Each series
should be shifted such that the middle series lies at the appropriate x-axis
tick with the other bars around it. For example, if we had four series
(i.e. four bars per x value), we want to shift the left edges of the bars as
such:
Bar 1: -2 * width
Bar 2: -width
Bar 3: none
Bar 4: width
"""
adjust = (-0.5 * num_plots - 1 + series_index) * bar_width
return [x_val + adjust for x_val in x]
# TODO(showard): merge much of this function with _create_line by extracting and
# parameterizing methods
def _create_bar(plots, labels, plot_info):
"""\
Given all the data for the metrics, create a line plot.
plots: list of dicts containing the plot data.
x: list of x-values for the plot
y: list of corresponding y-values
errors: errors for each data point, or None if no error information
available
label: plot title
labels: list of x-tick labels
plot_info: a MetricsPlot
"""
area_data = []
bars = []
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
# Set up the plot
subplot = figure.add_subplot(1, 1, 1)
subplot.set_xticks(range(0, len(labels)))
subplot.set_xlim(-1, len(labels))
subplot.set_xticklabels(labels, rotation=90, size=_BAR_XTICK_LABELS_SIZE)
# draw a bold line at y=0, making it easier to tell if bars are dipping
# below the axis or not.
subplot.axhline(linewidth=2, color='black')
# width here is the width for each bar in the plot. Matplotlib default is
# 0.8.
width = 0.8 / len(plots)
# Plot the data
for plot_index, (plot, color) in enumerate(zip(plots, _colors(len(plots)))):
# Invert the y-axis if needed
if plot['label'] in plot_info.inverted_series:
plot['y'] = [-y for y in plot['y']]
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
bar_data = subplot.bar(adjusted_x, plot['y'],
width=width, yerr=plot['errors'],
facecolor=color,
label=plot['label'])
bars.append(bar_data[0])
# Construct the information for the drilldowns.
# See comment in _create_line for why we need a separate loop to do this.
for plot_index, plot in enumerate(plots):
adjusted_x = _get_adjusted_bar(plot['x'], width, plot_index + 1,
len(plots))
# Let matplotlib plot the data, so that we can get the data-to-image
# coordinate transforms
line = subplot.plot(adjusted_x, plot['y'], linestyle='None')[0]
label = plot['label']
upper_left_coords = line.get_transform().transform(zip(adjusted_x,
plot['y']))
bottom_right_coords = line.get_transform().transform(
[(x + width, 0) for x in adjusted_x])
# Get the drilldown query
drill = plot_info.query_dict['__' + label + '__']
# Set the title attributes
x_labels = [labels[x] for x in plot['x']]
titles = ['%s - %s: %f' % (plot['label'], label, y)
for label, y in zip(x_labels, plot['y'])]
params = [dict(query=drill, series=plot['label'], param=x_label)
for x_label in x_labels]
area_data += [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title,
callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles,
params)]
figure.legend(bars, [plot['label'] for plot in plots])
return (figure, area_data)
def _normalize(data_values, data_errors, base_values, base_errors):
"""\
Normalize the data against a baseline.
data_values: y-values for the to-be-normalized data
data_errors: standard deviations for the to-be-normalized data
base_values: list of values normalize against
base_errors: list of standard deviations for those base values
"""
values = []
for value, base in zip(data_values, base_values):
try:
values.append(100 * (value - base) / base)
except ZeroDivisionError:
# Base is 0.0 so just simplify:
# If value < base: append -100.0;
# If value == base: append 0.0 (obvious); and
# If value > base: append 100.0.
values.append(100 * float(cmp(value, base)))
# Based on error for f(x,y) = 100 * (x - y) / y
if data_errors:
if not base_errors:
base_errors = [0] * len(data_errors)
errors = []
for data, error, base_value, base_error in zip(
data_values, data_errors, base_values, base_errors):
try:
errors.append(sqrt(error**2 * (100 / base_value)**2
+ base_error**2 * (100 * data / base_value**2)**2
+ error * base_error * (100 / base_value**2)**2))
except ZeroDivisionError:
# Again, base is 0.0 so do the simple thing.
errors.append(100 * abs(error))
else:
errors = None
return (values, errors)
def _create_png(figure):
"""\
Given the matplotlib figure, generate the PNG data for it.
"""
# Draw the image
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(figure)
canvas.draw()
size = canvas.get_renderer().get_canvas_width_height()
image_as_string = canvas.tostring_rgb()
image = PIL.Image.fromstring('RGB', size, image_as_string, 'raw', 'RGB', 0,
1)
image_background = PIL.Image.new(image.mode, image.size,
figure.get_facecolor())
# Crop the image to remove surrounding whitespace
non_whitespace = PIL.ImageChops.difference(image, image_background)
bounding_box = non_whitespace.getbbox()
image = image.crop(bounding_box)
image_data = StringIO.StringIO()
image.save(image_data, format='PNG')
return image_data.getvalue(), bounding_box
def _create_image_html(figure, area_data, plot_info):
"""\
Given the figure and drilldown data, construct the HTML that will render the
graph as a PNG image, and attach the image map to that image.
figure: figure containing the drawn plot(s)
area_data: list of parameters for each area of the image map. See the
definition of the template string '_AREA_TEMPLATE'
plot_info: a MetricsPlot or QualHistogram
"""
png, bbox = _create_png(figure)
# Construct the list of image map areas
areas = [_AREA_TEMPLATE %
(data['left'] - bbox[0], data['top'] - bbox[1],
data['right'] - bbox[0], data['bottom'] - bbox[1],
data['title'], data['callback'],
_json_encoder.encode(data['callback_arguments'])
.replace('"', '"'))
for data in area_data]
map_name = plot_info.drilldown_callback + '_map'
return _HTML_TEMPLATE % (base64.b64encode(png), map_name, map_name,
'\n'.join(areas))
def _find_plot_by_label(plots, label):
for index, plot in enumerate(plots):
if plot['label'] == label:
return index
raise ValueError('no plot labeled "%s" found' % label)
def _normalize_to_series(plots, base_series):
base_series_index = _find_plot_by_label(plots, base_series)
base_plot = plots[base_series_index]
base_xs = base_plot['x']
base_values = base_plot['y']
base_errors = base_plot['errors']
del plots[base_series_index]
for plot in plots:
old_xs, old_values, old_errors = plot['x'], plot['y'], plot['errors']
new_xs, new_values, new_errors = [], [], []
new_base_values, new_base_errors = [], []
# Select only points in the to-be-normalized data that have a
# corresponding baseline value
for index, x_value in enumerate(old_xs):
try:
base_index = base_xs.index(x_value)
except ValueError:
continue
new_xs.append(x_value)
new_values.append(old_values[index])
new_base_values.append(base_values[base_index])
if old_errors:
new_errors.append(old_errors[index])
new_base_errors.append(base_errors[base_index])
if not new_xs:
raise NoDataError('No normalizable data for series ' +
plot['label'])
plot['x'] = new_xs
plot['y'] = new_values
if old_errors:
plot['errors'] = new_errors
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
new_base_values,
new_base_errors)
def _create_metrics_plot_helper(plot_info, extra_text=None):
"""
Create a metrics plot of the given plot data.
plot_info: a MetricsPlot object.
extra_text: text to show at the uppper-left of the graph
TODO(showard): move some/all of this logic into methods on MetricsPlot
"""
query = plot_info.query_dict['__main__']
cursor = readonly_connection.connection().cursor()
cursor.execute(query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
rows = cursor.fetchall()
# "transpose" rows, so columns[0] is all the values from the first column,
# etc.
columns = zip(*rows)
plots = []
labels = [str(label) for label in columns[0]]
needs_resort = (cursor.description[0][0] == 'kernel')
# Collect all the data for the plot
col = 1
while col < len(cursor.description):
y = columns[col]
label = cursor.description[col][0]
col += 1
if (col < len(cursor.description) and
'errors-' + label == cursor.description[col][0]):
errors = columns[col]
col += 1
else:
errors = None
if needs_resort:
y = _resort(labels, y)
if errors:
errors = _resort(labels, errors)
x = [index for index, value in enumerate(y) if value is not None]
if not x:
raise NoDataError('No data for series ' + label)
y = [y[i] for i in x]
if errors:
errors = [errors[i] for i in x]
plots.append({
'label': label,
'x': x,
'y': y,
'errors': errors
})
if needs_resort:
labels = _resort(labels, labels)
# Normalize the data if necessary
normalize_to = plot_info.normalize_to
if normalize_to == 'first' or normalize_to.startswith('x__'):
if normalize_to != 'first':
baseline = normalize_to[3:]
try:
baseline_index = labels.index(baseline)
except ValueError:
raise ValidationError({
'Normalize' : 'Invalid baseline %s' % baseline
})
for plot in plots:
if normalize_to == 'first':
plot_index = 0
else:
try:
plot_index = plot['x'].index(baseline_index)
# if the value is not found, then we cannot normalize
except ValueError:
raise ValidationError({
'Normalize' : ('%s does not have a value for %s'
% (plot['label'], normalize_to[3:]))
})
base_values = [plot['y'][plot_index]] * len(plot['y'])
if plot['errors']:
base_errors = [plot['errors'][plot_index]] * len(plot['errors'])
plot['y'], plot['errors'] = _normalize(plot['y'], plot['errors'],
base_values,
None or base_errors)
elif normalize_to.startswith('series__'):
base_series = normalize_to[8:]
_normalize_to_series(plots, base_series)
# Call the appropriate function to draw the line or bar plot
if plot_info.is_line:
figure, area_data = _create_line(plots, labels, plot_info)
else:
figure, area_data = _create_bar(plots, labels, plot_info)
# TODO(showard): extract these magic numbers to named constants
if extra_text:
text_y = .95 - .0075 * len(plots)
figure.text(.1, text_y, extra_text, size='xx-small')
return (figure, area_data)
def create_metrics_plot(query_dict, plot_type, inverted_series, normalize_to,
drilldown_callback, extra_text=None):
plot_info = MetricsPlot(query_dict, plot_type, inverted_series,
normalize_to, drilldown_callback)
figure, area_data = _create_metrics_plot_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def _get_hostnames_in_bucket(hist_data, bucket):
"""\
Get all the hostnames that constitute a particular bucket in the histogram.
hist_data: list containing tuples of (hostname, pass_rate)
bucket: tuple containing the (low, high) values of the target bucket
"""
return [hostname for hostname, pass_rate in hist_data
if bucket[0] <= pass_rate < bucket[1]]
def _create_qual_histogram_helper(plot_info, extra_text=None):
"""\
Create a machine qualification histogram of the given data.
plot_info: a QualificationHistogram
extra_text: text to show at the upper-left of the graph
TODO(showard): move much or all of this into methods on
QualificationHistogram
"""
cursor = readonly_connection.connection().cursor()
cursor.execute(plot_info.query)
if not cursor.rowcount:
raise NoDataError('query did not return any data')
# Lists to store the plot data.
# hist_data store tuples of (hostname, pass_rate) for machines that have
# pass rates between 0 and 100%, exclusive.
# no_tests is a list of machines that have run none of the selected tests
# no_pass is a list of machines with 0% pass rate
# perfect is a list of machines with a 100% pass rate
hist_data = []
no_tests = []
no_pass = []
perfect = []
# Construct the lists of data to plot
for hostname, total, good in cursor.fetchall():
if total == 0:
no_tests.append(hostname)
continue
if good == 0:
no_pass.append(hostname)
elif good == total:
perfect.append(hostname)
else:
percentage = 100.0 * good / total
hist_data.append((hostname, percentage))
interval = plot_info.interval
bins = range(0, 100, interval)
if bins[-1] != 100:
bins.append(bins[-1] + interval)
figure, height = _create_figure(_SINGLE_PLOT_HEIGHT)
subplot = figure.add_subplot(1, 1, 1)
# Plot the data and get all the bars plotted
_,_, bars = subplot.hist([data[1] for data in hist_data],
bins=bins, align='left')
bars += subplot.bar([-interval], len(no_pass),
width=interval, align='center')
bars += subplot.bar([bins[-1]], len(perfect),
width=interval, align='center')
bars += subplot.bar([-3 * interval], len(no_tests),
width=interval, align='center')
buckets = [(bin, min(bin + interval, 100)) for bin in bins[:-1]]
# set the x-axis range to cover all the normal bins plus the three "special"
# ones - N/A (3 intervals left), 0% (1 interval left) ,and 100% (far right)
subplot.set_xlim(-4 * interval, bins[-1] + interval)
subplot.set_xticks([-3 * interval, -interval] + bins + [100 + interval])
subplot.set_xticklabels(['N/A', '0%'] +
['%d%% - <%d%%' % bucket for bucket in buckets] +
['100%'], rotation=90, size='small')
# Find the coordinates on the image for each bar
x = []
y = []
for bar in bars:
x.append(bar.get_x())
y.append(bar.get_height())
f = subplot.plot(x, y, linestyle='None')[0]
upper_left_coords = f.get_transform().transform(zip(x, y))
bottom_right_coords = f.get_transform().transform(
[(x_val + interval, 0) for x_val in x])
# Set the title attributes
titles = ['%d%% - <%d%%: %d machines' % (bucket[0], bucket[1], y_val)
for bucket, y_val in zip(buckets, y)]
titles.append('0%%: %d machines' % len(no_pass))
titles.append('100%%: %d machines' % len(perfect))
titles.append('N/A: %d machines' % len(no_tests))
# Get the hostnames for each bucket in the histogram
names_list = [_get_hostnames_in_bucket(hist_data, bucket)
for bucket in buckets]
names_list += [no_pass, perfect]
if plot_info.filter_string:
plot_info.filter_string += ' AND '
# Construct the list of drilldown parameters to be passed when the user
# clicks on the bar.
params = []
for names in names_list:
if names:
hostnames = ','.join(_quote(hostname) for hostname in names)
hostname_filter = 'hostname IN (%s)' % hostnames
full_filter = plot_info.filter_string + hostname_filter
params.append({'type': 'normal',
'filterString': full_filter})
else:
params.append({'type': 'empty'})
params.append({'type': 'not_applicable',
'hosts': '<br />'.join(no_tests)})
area_data = [dict(left=ulx, top=height - uly,
right=brx, bottom=height - bry,
title=title, callback=plot_info.drilldown_callback,
callback_arguments=param_dict)
for (ulx, uly), (brx, bry), title, param_dict
in zip(upper_left_coords, bottom_right_coords, titles, params)]
# TODO(showard): extract these magic numbers to named constants
if extra_text:
figure.text(.1, .95, extra_text, size='xx-small')
return (figure, area_data)
def create_qual_histogram(query, filter_string, interval, drilldown_callback,
extra_text=None):
plot_info = QualificationHistogram(query, filter_string, interval,
drilldown_callback)
figure, area_data = _create_qual_histogram_helper(plot_info, extra_text)
return _create_image_html(figure, area_data, plot_info)
def create_embedded_plot(model, update_time):
"""\
Given an EmbeddedGraphingQuery object, generate the PNG image for it.
model: EmbeddedGraphingQuery object
update_time: 'Last updated' time
"""
params = pickle.loads(model.params)
extra_text = 'Last updated: %s' % update_time
if model.graph_type == 'metrics':
plot_info = MetricsPlot(query_dict=params['queries'],
plot_type=params['plot'],
inverted_series=params['invert'],
normalize_to=None,
drilldown_callback='')
figure, areas_unused = _create_metrics_plot_helper(plot_info,
extra_text)
elif model.graph_type == 'qual':
plot_info = QualificationHistogram(
query=params['query'], filter_string=params['filter_string'],
interval=params['interval'], drilldown_callback='')
figure, areas_unused = _create_qual_histogram_helper(plot_info,
extra_text)
else:
raise ValueError('Invalid graph_type %s' % model.graph_type)
image, bounding_box_unused = _create_png(figure)
return image
_cache_timeout = global_config.global_config.get_config_value(
'TKO', 'graph_cache_creation_timeout_minutes')
def handle_plot_request(id, max_age):
"""\
Given the embedding id of a graph, generate a PNG of the embedded graph
associated with that id.
id: id of the embedded graph
max_age: maximum age, in minutes, that a cached version should be held
"""
model = models.EmbeddedGraphingQuery.objects.get(id=id)
# Check if the cached image needs to be updated
now = datetime.datetime.now()
update_time = model.last_updated + datetime.timedelta(minutes=int(max_age))
if now > update_time:
cursor = django.db.connection.cursor()
# We want this query to update the refresh_time only once, even if
# multiple threads are running it at the same time. That is, only the
# first thread will win the race, and it will be the one to update the
# cached image; all other threads will show that they updated 0 rows
query = """
UPDATE embedded_graphing_queries
SET refresh_time = NOW()
WHERE id = %s AND (
refresh_time IS NULL OR
refresh_time + INTERVAL %s MINUTE < NOW()
)
"""
cursor.execute(query, (id, _cache_timeout))
# Only refresh the cached image if we were successful in updating the
# refresh time
if cursor.rowcount:
model.cached_png = create_embedded_plot(model, now.ctime())
model.last_updated = now
model.refresh_time = None
model.save()
return model.cached_png
| gpl-2.0 |
zorroblue/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 23 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
# #############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=0.8)
label_spread.fit(X, labels)
# #############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
phobson/statsmodels | statsmodels/tsa/statespace/tests/test_tools.py | 1 | 15218 | """
Tests for tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tsa.statespace import tools
from statsmodels.tsa.api import acovf
# from .results import results_sarimax
from numpy.testing import (
assert_allclose, assert_equal, assert_array_equal, assert_almost_equal,
assert_raises
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],
[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],
[2,0,1],
[3,0,0]])),
([1,-np.array([[1,2],[3,4]]),-np.array([[5,6],[7,8]])],
np.array([[1,2,5,6],
[3,4,7,8],
[1,0,0,0],
[0,1,0,0]]).T)
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestSolveDiscreteLyapunov(object):
def solve_dicrete_lyapunov_direct(self, a, q, complex_step=False):
# This is the discrete Lyapunov solver as "real function of real
# variables": the difference between this and the usual, complex,
# version is that in the Kronecker product the second argument is
# *not* conjugated here.
if not complex_step:
lhs = np.kron(a, a.conj())
lhs = np.eye(lhs.shape[0]) - lhs
x = np.linalg.solve(lhs, q.flatten())
else:
lhs = np.kron(a, a)
lhs = np.eye(lhs.shape[0]) - lhs
x = np.linalg.solve(lhs, q.flatten())
return np.reshape(x, q.shape)
def test_univariate(self):
# Real case
a = np.array([[0.5]])
q = np.array([[10.]])
actual = tools.solve_discrete_lyapunov(a, q)
desired = solve_discrete_lyapunov(a, q)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a complex
# function)
a = np.array([[0.5+1j]])
q = np.array([[10.]])
actual = tools.solve_discrete_lyapunov(a, q)
desired = solve_discrete_lyapunov(a, q)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a real
# function)
a = np.array([[0.5+1j]])
q = np.array([[10.]])
actual = tools.solve_discrete_lyapunov(a, q, complex_step=True)
desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True)
assert_allclose(actual, desired)
def test_multivariate(self):
# Real case
a = tools.companion_matrix([1, -0.4, 0.5])
q = np.diag([10., 5.])
actual = tools.solve_discrete_lyapunov(a, q)
desired = solve_discrete_lyapunov(a, q)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a complex
# function)
a = tools.companion_matrix([1, -0.4+0.1j, 0.5])
q = np.diag([10., 5.])
actual = tools.solve_discrete_lyapunov(a, q, complex_step=False)
desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=False)
assert_allclose(actual, desired)
# Complex case (where the Lyapunov equation is taken as a real
# function)
a = tools.companion_matrix([1, -0.4+0.1j, 0.5])
q = np.diag([10., 5.])
actual = tools.solve_discrete_lyapunov(a, q, complex_step=True)
desired = self.solve_dicrete_lyapunov_direct(a, q, complex_step=True)
assert_allclose(actual, desired)
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
assert_equal(result, constrained)
class TestUnconstrainStationaryUnivariate(object):
cases = [
(np.array([-2./((1+2.**2)**0.5)]), np.array([2.]))
]
def test_cases(self):
for constrained, unconstrained in self.cases:
result = tools.unconstrain_stationary_univariate(constrained)
assert_allclose(result, unconstrained)
class TestStationaryUnivariate(object):
# Test that the constraint and unconstraint functions are inverses
constrained_cases = [
np.array([0]), np.array([0.1]), np.array([-0.5]), np.array([0.999])]
unconstrained_cases = [
np.array([10.]), np.array([-40.42]), np.array([0.123])]
def test_cases(self):
for constrained in self.constrained_cases:
unconstrained = tools.unconstrain_stationary_univariate(constrained)
reconstrained = tools.constrain_stationary_univariate(unconstrained)
assert_allclose(reconstrained, constrained)
for unconstrained in self.unconstrained_cases:
constrained = tools.constrain_stationary_univariate(unconstrained)
reunconstrained = tools.unconstrain_stationary_univariate(constrained)
assert_allclose(reunconstrained, unconstrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_vector_shape, *args
)
def test_multivariate_acovf():
_acovf = tools._compute_multivariate_acovf_from_coefficients
# Test for a VAR(1) process. From Lutkepohl (2007), pages 27-28.
# See (2.1.14) for Phi_1, (2.1.33) for Sigma_u, and (2.1.34) for Gamma_0
Sigma_u = np.array([[2.25, 0, 0],
[0, 1.0, 0.5],
[0, 0.5, 0.74]])
Phi_1 = np.array([[0.5, 0, 0],
[0.1, 0.1, 0.3],
[0, 0.2, 0.3]])
Gamma_0 = np.array([[3.0, 0.161, 0.019],
[0.161, 1.172, 0.674],
[0.019, 0.674, 0.954]])
assert_allclose(_acovf([Phi_1], Sigma_u)[0], Gamma_0, atol=1e-3)
# Test for a VAR(2) process. From Lutkepohl (2007), pages 28-29
# See (2.1.40) for Phi_1, Phi_2, (2.1.14) for Sigma_u, and (2.1.42) for
# Gamma_0, Gamma_1
Sigma_u = np.diag([0.09, 0.04])
Phi_1 = np.array([[0.5, 0.1],
[0.4, 0.5]])
Phi_2 = np.array([[0, 0],
[0.25, 0]])
Gamma_0 = np.array([[0.131, 0.066],
[0.066, 0.181]])
Gamma_1 = np.array([[0.072, 0.051],
[0.104, 0.143]])
Gamma_2 = np.array([[0.046, 0.040],
[0.113, 0.108]])
Gamma_3 = np.array([[0.035, 0.031],
[0.093, 0.083]])
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=0),
[Gamma_0], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=1),
[Gamma_0, Gamma_1], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u),
[Gamma_0, Gamma_1], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=2),
[Gamma_0, Gamma_1, Gamma_2], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=3),
[Gamma_0, Gamma_1, Gamma_2, Gamma_3], atol=1e-3)
# Test sample acovf in the univariate case against sm.tsa.acovf
x = np.arange(20)*1.0
assert_allclose(
np.squeeze(tools._compute_multivariate_sample_acovf(x, maxlag=4)),
acovf(x)[:5])
def test_multivariate_pacf():
# Test sample acovf in the univariate case against sm.tsa.acovf
np.random.seed(1234)
x = np.arange(10000)
y = np.random.normal(size=10000)
# Note: could make this test more precise with higher nobs, but no need to
assert_allclose(
tools._compute_multivariate_sample_pacf(np.c_[x, y], maxlag=1)[0],
np.diag([1, 0]), atol=1e-2)
class TestConstrainStationaryMultivariate(object):
cases = [
# This is the same test as the univariate case above, except notice
# the sign difference; this is an array input / output
(np.array([[2.]]), np.eye(1), np.array([[2./((1+2.**2)**0.5)]])),
# Same as above, but now a list input / output
([np.array([[2.]])], np.eye(1), [np.array([[2./((1+2.**2)**0.5)]])])
]
eigval_cases = [
[np.array([[0]])],
[np.array([[100]]), np.array([[50]])],
[np.array([[30, 1], [-23, 15]]), np.array([[10, .3], [.5, -30]])],
]
def test_cases(self):
# Test against known results
for unconstrained, error_variance, constrained in self.cases:
result = tools.constrain_stationary_multivariate(
unconstrained, error_variance)
assert_allclose(result[0], constrained)
# Test that the constrained results correspond to companion matrices
# with eigenvalues less than 1 in modulus
for unconstrained in self.eigval_cases:
if type(unconstrained) == list:
cov = np.eye(unconstrained[0].shape[0])
else:
cov = np.eye(unconstrained.shape[0])
constrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
companion = tools.companion_matrix(
[1] + [-constrained[i] for i in range(len(constrained))]
).T
assert_equal(np.max(np.abs(np.linalg.eigvals(companion))) < 1, True)
class TestUnconstrainStationaryMultivariate(object):
cases = [
# This is the same test as the univariate case above, except notice
# the sign difference; this is an array input / output
(np.array([[2./((1+2.**2)**0.5)]]), np.eye(1), np.array([[2.]])),
# Same as above, but now a list input / output
([np.array([[2./((1+2.**2)**0.5)]])], np.eye(1), [np.array([[2.]])])
]
def test_cases(self):
for constrained, error_variance, unconstrained in self.cases:
result = tools.unconstrain_stationary_multivariate(
constrained, error_variance)
assert_allclose(result[0], unconstrained)
class TestStationaryMultivariate(object):
# Test that the constraint and unconstraint functions are inverses
constrained_cases = [
np.array([[0]]), np.array([[0.1]]), np.array([[-0.5]]), np.array([[0.999]]),
[np.array([[0]])],
np.array([[0.8, -0.2]]),
[np.array([[0.8]]), np.array([[-0.2]])],
[np.array([[0.3, 0.01], [-0.23, 0.15]]), np.array([[0.1, 0.03], [0.05, -0.3]])],
np.array([[0.3, 0.01, 0.1, 0.03], [-0.23, 0.15, 0.05, -0.3]])
]
unconstrained_cases = [
np.array([[0]]), np.array([[-40.42]]), np.array([[0.123]]),
[np.array([[0]])],
np.array([[100, 50]]),
[np.array([[100]]), np.array([[50]])],
[np.array([[30, 1], [-23, 15]]), np.array([[10, .3], [.5, -30]])],
np.array([[30, 1, 10, .3], [-23, 15, .5, -30]])
]
def test_cases(self):
for constrained in self.constrained_cases:
if type(constrained) == list:
cov = np.eye(constrained[0].shape[0])
else:
cov = np.eye(constrained.shape[0])
unconstrained, _ = tools.unconstrain_stationary_multivariate(constrained, cov)
reconstrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
assert_allclose(reconstrained, constrained)
for unconstrained in self.unconstrained_cases:
if type(unconstrained) == list:
cov = np.eye(unconstrained[0].shape[0])
else:
cov = np.eye(unconstrained.shape[0])
constrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
reunconstrained, _ = tools.unconstrain_stationary_multivariate(constrained, cov)
# Note: low tolerance comes from last example in unconstrained_cases,
# but is not a real problem
assert_allclose(reunconstrained, unconstrained, atol=1e-4)
| bsd-3-clause |
imaculate/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
ankurhanda/nicp | nicp/nicp_evaluation/scripts/evaluate_ate.py | 22 | 8437 | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
import sys
import numpy
import argparse
import associate
def align(model,data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 2*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = first_list.keys()
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = second_list.keys()
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "absolute_translational_error.mean %f m"%numpy.mean(trans_error)
print "absolute_translational_error.median %f m"%numpy.median(trans_error)
print "absolute_translational_error.std %f m"%numpy.std(trans_error)
print "absolute_translational_error.min %f m"%numpy.min(trans_error)
print "absolute_translational_error.max %f m"%numpy.max(trans_error)
else:
print "%f"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure()
ax = fig.add_subplot(111)
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth")
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated")
label="difference"
for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
ax.plot([x1,x2],[y1,y2],'-',color="red",label=label)
label=""
ax.legend()
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=90)
| gpl-3.0 |
mitschabaude/nanopores | scripts/pughpore/randomwalk/test/create_compare_both_intro.py | 1 | 4076 | # -*- coding: utf-8 -*-
from matplotlib import gridspec
import math
import matplotlib
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import sys
import nanopores.tools.fields as f
HOME = os.path.expanduser("~")
PAPERDIR = os.path.join(HOME, "papers", "paper-howorka")
FIGDIR = os.path.join(PAPERDIR, "figures", "")
DATADIR = os.path.join(HOME,"Dropbox", "nanopores", "fields")
f.set_dir(DATADIR)
number=False
geop = nano.Params(pughpore.params)
hpore=geop.hpore
fieldsname='eventsnew_both_1_'
params=dict(avgbind1=23e6,avgbind2=3e4,P_bind1=0.035,P_bind2=3e-1,z0=hpore/2.+0.)
drop, th = f.get("events_pugh_experiment", "drop", "t")
th = [1e0*time for time in th]
#cmap=matplotlib.cm.get_cmap('viridis')
data=f.get_fields(fieldsname,**params)
figname = fieldsname+'_%.1e_%.1e_%.1e_%.1e'%(params["avgbind1"],params["avgbind2"],params["P_bind1"],params["P_bind2"])+str(params["z0"])
t = data["t"]
a = data["a"]
ood = data["ood"]
lendata=len(t)
fac=1.
if max(t)<1e-2:
fac=1e3
t = [x*1e3 for x in t]
P_bind1=params["P_bind1"]
P_bind2=params["P_bind2"]
avgbind1=params["avgbind1"]*1e-6
avgbind2=params["avgbind2"]*1e-6
color2='#0080ff'
color1='#00cbba'
color3='#00ff55'
plt.figure(figsize=(7,5),dpi=80)
gs = gridspec.GridSpec(2,3,width_ratios=[4,2,1],height_ratios=[1,2.5])
gs.update(wspace=0.,hspace=0.)
minperc=0.
maxperc=40.
#plt1=plt.subplot(gs[1,0])
plt1=plt.subplot()
for k in range(lendata):
if t[k]<0.0015:
type1 = plt1.scatter([t[k]],[a[k]],color=color3,s=8)
elif t[k]<2.:
type2 = plt1.scatter([t[k]],[a[k]],color=color1,s=8)
else:
type3 = plt1.scatter([t[k]],[a[k]],color=color2,s=8)
# if ood[k]==0:
# type1 = plt1.scatter([t[k]],[a[k]],color=color2,s=8)
# else:
# type0 = plt1.scatter([t[k]],[a[k]],color=color3,s=8)
experiment = plt1.scatter(th,drop,color='#888888',s=8)
plt.legend([experiment,type1,type2,type3],['Experiment','Did not bind','Short binding','Long binding'],scatterpoints=4,loc=(0.01,0.01),frameon=False)
xfmt=FormatStrFormatter('%g')
plt1.set_xlim([.2*min(t),max(max(t),max(th))*2.])
plt1.set_ylim([minperc,maxperc])
plt1.set_xscale('log')
plt1.xaxis.set_major_formatter(xfmt)
plt1.invert_yaxis()
plt1.set_ylabel(r'A/I$_0$ [%]',fontsize=15)
if fac==1.:
# if P_bind1!=0.:
# plt1.text(avgbind1*.5,27.,'Long binding',fontsize=9,horizontalalignment='center')
# k=1.0
# plt1.add_patch(matplotlib.patches.Rectangle((avgbind1*10**(-k*2),0.),avgbind1*(10**(k)-10**(-k)),maxperc,facecolor=cmap(.7),alpha=.15))
# if P_bind2!=0.:
# plt1.text(avgbind2*.5,27.,'Short binding',fontsize=9,horizontalalignment='center')
# k=1.0
# plt1.add_patch(matplotlib.patches.Rectangle((avgbind2*10**(-k),0.),avgbind2*(10**(k)-10**(-k)),maxperc,facecolor=cmap(.4),alpha=.15))
plt1.set_xlabel(r'$\tau_{off}$ [ms]',fontsize=15)
else:
plt1.set_xlabel(ur'$\tau_{off}$ [µs]',fontsize=15)
#plt2=plt.subplot(gs[1,1])
#for k in range(lendata):
# if ood[k]==0:
# type1 = plt2.scatter([t[k]],[a[k]],color=color2,s=8)
# else:
# type0 = plt2.scatter([t[k]],[a[k]],color=color3,s=8)
#plt2.invert_yaxis()
#plt2.set_ylim([maxperc,minperc])
#plt2.set_xlim([-2e-2*max(t),max(t)*(1.+2e-2)])
#plt2.axes.get_yaxis().set_visible(False)
#plt2.axes.get_xaxis().major.locator.set_params(nbins=6)
#
#plt3=plt.subplot(gs[1,2])
#n, bins, patches = plt3.hist(np.array(a),15,normed=1,orientation='horizontal',color=color1,alpha=.5)
#plt3.invert_yaxis()
#plt3.set_xlim([0.,max(n)*1.2])
#plt3.set_ylim([maxperc,minperc])
#plt3.axes.get_xaxis().set_visible(False)
#plt3.axes.get_yaxis().set_visible(False)
#
#
#
#plt4=plt.subplot(gs[0,1])
#n, bins, patches = plt4.hist(np.array(t),20,normed=1,color=color1,alpha=.5)
#plt4.set_xlim([-2e-2*max(t),max(t)*(1.+2e-2)])
#plt4.axes.get_xaxis().set_visible(False)
#plt4.axes.get_yaxis().set_visible(False)
plt.tight_layout()
plt.show()
#plt.savefig('events_both_compare_intro.pdf')
| mit |
anderspitman/scikit-bio | skbio/stats/ordination/tests/test_redundancy_analysis.py | 1 | 6482 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import numpy.testing as npt
import pandas as pd
from unittest import TestCase, main
from skbio import OrdinationResults
from skbio.stats.ordination import rda
from skbio.util import get_data_path, assert_ordination_results_equal
class TestRDAErrors(TestCase):
def setUp(self):
pass
def test_shape(self):
for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
Y = pd.DataFrame(np.random.randn(n, p))
X = pd.DataFrame(np.random.randn(n_, m))
yield npt.assert_raises, ValueError, rda, Y, X, None, None
class TestRDAResults(TestCase):
# STATUS: L&L only shows results with scaling 1, and they agree
# with vegan's (module multiplying by a constant). I can also
# compute scaling 2, agreeing with vegan, but there are no written
# results in L&L.
def setUp(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
self.sample_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9']
self.feature_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5']
self.env_ids = map(str, range(4))
self.pc_ids = ['RDA1', 'RDA2', 'RDA3', 'RDA4', 'RDA5', 'RDA6', 'RDA7']
self.Y = pd.DataFrame(
np.loadtxt(get_data_path('example2_Y')),
index=self.sample_ids, columns=self.feature_ids)
self.X = pd.DataFrame(
np.loadtxt(get_data_path('example2_X')),
index=self.sample_ids, columns=self.env_ids)
def test_scaling1(self):
scores = rda(self.Y, self.X, scaling=1)
biplot_scores = pd.DataFrame(np.loadtxt(
get_data_path('example2_biplot_scaling1')))
sample_constraints = pd.DataFrame(np.loadtxt(
get_data_path('example2_sample_constraints_scaling1')))
# Load data as computed with vegan 2.0-8
vegan_features = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_species_scaling1_from_vegan')),
index=self.feature_ids,
columns=self.pc_ids)
vegan_samples = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_site_scaling1_from_vegan')),
index=self.sample_ids,
columns=self.pc_ids)
sample_constraints = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_sample_constraints_scaling1')),
index=self.sample_ids,
columns=self.pc_ids)
biplot_scores = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_biplot_scaling1')))
# These are wrong. See issue #1002
proportion_explained = pd.Series([0.44275783, 0.25614586,
0.15280354, 0.10497021,
0.02873375, 0.00987052,
0.00471828],
index=self.pc_ids)
# These are wrong. See issue #1002
eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
1.680705, 0.577350, 0.275984],
index=self.pc_ids)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=sample_constraints,
biplot_scores=biplot_scores,
proportion_explained=proportion_explained,
eigvals=eigvals)
assert_ordination_results_equal(scores, exp,
ignore_directionality=True,
ignore_biplot_scores_labels=True,
decimal=6)
def test_scaling2(self):
scores = rda(self.Y, self.X, scaling=2)
biplot_scores = pd.DataFrame(np.loadtxt(
get_data_path('example2_biplot_scaling2')))
sample_constraints = pd.DataFrame(np.loadtxt(
get_data_path('example2_sample_constraints_scaling2')))
# Load data as computed with vegan 2.0-8
vegan_features = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_species_scaling2_from_vegan')),
index=self.feature_ids,
columns=self.pc_ids)
vegan_samples = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_site_scaling2_from_vegan')),
index=self.sample_ids,
columns=self.pc_ids)
sample_constraints = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_sample_constraints_scaling2')),
index=self.sample_ids,
columns=self.pc_ids)
biplot_scores = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_biplot_scaling2')))
# These are wrong. See issue #1002
proportion_explained = pd.Series([0.44275783, 0.25614586,
0.15280354, 0.10497021,
0.02873375, 0.00987052,
0.00471828],
index=self.pc_ids)
# These are wrong. See issue #1002
eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
1.680705, 0.577350, 0.275984],
index=self.pc_ids)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=sample_constraints,
biplot_scores=biplot_scores,
proportion_explained=proportion_explained,
eigvals=eigvals)
assert_ordination_results_equal(scores, exp,
ignore_directionality=True,
ignore_biplot_scores_labels=True,
decimal=6)
if __name__ == '__main__':
main()
| bsd-3-clause |
readevalprint/zipline | tests/test_data_util.py | 4 | 3092 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import RollingPanel
class TestRollingPanel(unittest.TestCase):
def test_basics(self):
items = ['foo', 'bar', 'baz']
minor = ['A', 'B', 'C', 'D']
window = 10
rp = RollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque()
frames = {}
for i in range(30):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
date = dates[i]
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
if i >= window:
major_deque.popleft()
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def f(option='clever', n=500, copy=False):
items = range(5)
minor = range(20)
window = 100
periods = n
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
if option == 'clever':
rp = RollingPanel(window, items, minor, cap_multiple=2)
major_deque = deque()
dummy = pd.DataFrame(np.random.randn(len(items), len(minor)),
index=items, columns=minor)
for i in range(periods):
frame = dummy * (1 + 0.001 * i)
date = dates[i]
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
if i >= window:
del frames[major_deque.popleft()]
result = rp.get_current()
if copy:
result = result.copy()
else:
major_deque = deque()
dummy = pd.DataFrame(np.random.randn(len(items), len(minor)),
index=items, columns=minor)
for i in range(periods):
frame = dummy * (1 + 0.001 * i)
date = dates[i]
frames[date] = frame
major_deque.append(date)
if i >= window:
del frames[major_deque.popleft()]
result = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
| apache-2.0 |
MatthieuBizien/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 28 | 3652 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
HolgerPeters/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 66 | 8261 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
def test_singular_values():
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
rpca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12)
# Compare to the Frobenius norm
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 12)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=3, algorithm='arpack',
random_state=rng)
rpca = TruncatedSVD(n_components=3, algorithm='randomized',
random_state=rng)
X_apca = apca.fit_transform(X)
X_rpca = rpca.fit_transform(X)
X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0))
X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0))
X_apca[:, 0] *= 3.142
X_apca[:, 1] *= 2.718
X_rpca[:, 0] *= 3.142
X_rpca[:, 1] *= 2.718
X_hat_apca = np.dot(X_apca, apca.components_)
X_hat_rpca = np.dot(X_rpca, rpca.components_)
apca.fit(X_hat_apca)
rpca.fit(X_hat_rpca)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/io/formats/format.py | 1 | 56873 | """
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from functools import partial
from io import StringIO
import re
from shutil import get_terminal_size
from unicodedata import east_asian_width
import numpy as np
from pandas._config.config import get_option, set_option
from pandas._libs import lib
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
ABCSparseArray,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.index import Index, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.io.common import _expand_user, _stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : %(col_space_type)s, optional
%(col_space)s.
header : bool, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
"""
_VALID_JUSTIFY_PARAMETERS = (
"left",
"right",
"center",
"justify",
"justify-all",
"start",
"end",
"inherit",
"match-parent",
"initial",
"unset",
)
return_docstring = """
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
"""
class CategoricalFormatter:
def __init__(self, categorical, buf=None, length=True, na_rep="NaN", footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO("")
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ""
if self.length:
if footer:
footer += ", "
footer += "Length: {length}".format(length=len(self.categorical))
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_values(self):
return format_array(
self.categorical._internal_get_values(),
None,
float_format=None,
na_rep=self.na_rep,
)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return ""
fmt_values = self._get_formatted_values()
result = ["{i}".format(i=i) for i in fmt_values]
result = [i.strip() for i in result]
result = ", ".join(result)
result = ["[" + result + "]"]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return str("\n".join(result))
class SeriesFormatter:
def __init__(
self,
series,
buf=None,
length=True,
header=True,
index=True,
na_rep="NaN",
name=False,
float_format=None,
dtype=True,
max_rows=None,
min_rows=None,
):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
self.min_rows = min_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
min_rows = self.min_rows
max_rows = self.max_rows
# truncation determined by max_rows, actual truncated number of rows
# used below by min_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if min_rows:
# if min_rows is set (not None or 0), set max_rows to minimum
# of both
max_rows = min(min_rows, max_rows)
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = ""
if getattr(self.series.index, "freq", None) is not None:
footer += "Freq: {freq}".format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
if footer:
footer += ", "
series_name = pprint_thing(name, escape_chars=("\t", "\r", "\n"))
footer += (
("Name: {sname}".format(sname=series_name)) if name is not None else ""
)
if self.length is True or (self.length == "truncate" and self.truncate_v):
if footer:
footer += ", "
footer += "Length: {length}".format(length=len(self.series))
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, "name", None)
if name:
if footer:
footer += ", "
footer += "dtype: {typ}".format(typ=pprint_thing(name))
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, ABCMultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
values_to_format = self.tr_series._formatting_values()
return format_array(
values_to_format, None, float_format=self.float_format, na_rep=self.na_rep
)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return "{name}([], {footer})".format(
name=self.series.__class__.__name__, footer=footer
)
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = "..."
else:
dot_str = ".."
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode="center")[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, "")
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values)
if self.header and have_header:
result = fmt_index[0] + "\n" + result
if footer:
result += "\n" + footer
return str("".join(result))
class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return len(text)
def justify(self, texts, max_len, mode="right"):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super().__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
def len(self, text):
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(
self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
)
def justify(self, texts, max_len, mode="right"):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == "left":
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == "center":
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter:
show_dimensions = None
@property
def should_show_dimensions(self):
return self.show_dimensions is True or (
self.show_dimensions == "truncate" and self.is_truncated
)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ""
__doc__ += common_docstring + return_docstring
def __init__(
self,
frame,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
justify=None,
float_format=None,
sparsify=None,
index_names=True,
line_width=None,
max_rows=None,
min_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
table_id=None,
render_links=False,
**kwds
):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.min_rows = min_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame))
self.show_dimensions = show_dimensions
self.table_id = table_id
self.render_links = render_links
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, "max_rows_adj"):
if max_rows:
if (len(self.frame) > max_rows) and self.min_rows:
# if truncated, set max_rows showed to min_rows
max_rows = min(self.min_rows, max_rows)
self.max_rows_adj = max_rows
if not hasattr(self, "max_cols_adj"):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = max_cols_adj // 2
frame = concat(
(frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1
)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values,
self.justify,
minimum=(self.col_space or 0),
adj=self.adj,
)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(
(
"Writing {ncols} cols but got {nalias} "
"aliases".format(
ncols=len(self.columns), nalias=len(self.header)
)
)
)
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
if self.show_row_idx_names:
for x in str_columns:
x.append("")
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(
self.col_space or 0, *(self.adj.len(x) for x in cheader)
)
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
)
max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
strcols.insert(self.tr_col_num + 1, [" ..."] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = "..."
else:
my_str = ".."
if ix == 0:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = "Empty {name}\nColumns: {col}\nIndex: {idx}".format(
name=type(self.frame).__name__,
col=pprint_thing(frame.columns),
idx=pprint_thing(frame.index),
)
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (
not isinstance(self.max_cols, int) or self.max_cols > 0
): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split("\n")
max_len = Series(text).str.len().max()
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.0))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= col_len + 1
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_adj = n_cols - self.index
# GH-21180. Ensure that we print at least two.
max_cols_adj = max(max_cols_adj, 2)
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write(
"\n\n[{nrows} rows x {ncols} columns]".format(
nrows=len(frame), ncols=len(frame.columns)
)
)
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width
col_widths = [
np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0
for col in strcols
]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([" \\"] + [" "] * (nrows - 1))
else:
row.append([" "] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return "\n\n".join(str_lst)
def to_latex(
self,
column_format=None,
longtable=False,
encoding=None,
multicolumn=False,
multicolumn_format=None,
multirow=False,
):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
latex_renderer = LatexFormatter(
self,
column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
)
if encoding is None:
encoding = "utf-8"
if hasattr(self.buf, "write"):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, str):
import codecs
with codecs.open(self.buf, "w", encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError("buf is not a file name and it has no write " "method")
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
values_to_format = frame.iloc[:, i]._formatting_values()
return format_array(
values_to_format,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
space=self.col_space,
decimal=self.decimal,
)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
.. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
html = Klass(self, classes=classes, border=border).render()
if hasattr(self.buf, "write"):
buffer_put_lines(self.buf, html)
elif isinstance(self.buf, str):
with open(self.buf, "w") as f:
buffer_put_lines(f, html)
else:
raise TypeError("buf is not a file name and it has no write " " method")
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, ABCMultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = list(zip(*fmt_columns))
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(l.is_floating for l in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (
y not in self.formatters
and need_leadsp[x]
and not restrict_formatting
):
return " " + y
return y
str_columns = list(
zip(*[[space_format(x, y) for y in x] for x in fmt_columns])
)
if self.sparsify and len(str_columns):
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [
[" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns, fmt_columns))
]
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
@property
def show_row_idx_names(self):
return all((self.has_index_names, self.index, self.show_index_names))
@property
def show_col_idx_names(self):
return all((self.has_column_names, self.show_index_names, self.header))
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
fmt = self._get_formatter("__index__")
if isinstance(index, ABCMultiIndex):
fmt_index = index.format(
sparsify=self.sparsify,
adjoin=False,
names=self.show_row_idx_names,
formatter=fmt,
)
else:
fmt_index = [index.format(name=self.show_row_idx_names, formatter=fmt)]
fmt_index = [
tuple(
_make_fixed_width(
list(x), justify="left", minimum=(self.col_space or 0), adj=self.adj
)
)
for x in fmt_index
]
adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
# empty space for columns
if self.show_col_idx_names:
col_header = ["{x}".format(x=x) for x in self._get_column_name_list()]
else:
col_header = [""] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, ABCMultiIndex):
names.extend("" if name is None else name for name in columns.names)
else:
names.append("" if columns.name is None else columns.name)
return names
# ----------------------------------------------------------------------
# Array formatters
def format_array(
values,
formatter,
float_format=None,
na_rep="NaN",
digits=None,
space=None,
justify="right",
decimal=".",
leading_space=None,
):
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(
values,
digits=digits,
na_rep=na_rep,
float_format=float_format,
formatter=formatter,
space=space,
justify=justify,
decimal=decimal,
leading_space=leading_space,
)
return fmt_obj.get_result()
class GenericArrayFormatter:
def __init__(
self,
values,
digits=7,
formatter=None,
na_rep="NaN",
space=12,
float_format=None,
justify="right",
decimal=".",
quoting=None,
fixed_width=True,
leading_space=None,
):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
self.leading_space = leading_space
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = "{{x: .{prec:d}g}}".format(
prec=get_option("display.precision")
)
float_format = lambda x: fmt_str.format(x=x)
else:
float_format = self.float_format
formatter = (
self.formatter
if self.formatter is not None
else (lambda x: pprint_thing(x, escape_chars=("\t", "\r", "\n")))
)
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
try:
# try block for np.isnat specifically
# determine na_rep if x is None or NaT-like
if x is None:
return "None"
elif x is NaT or np.isnat(x):
return "NaT"
except (TypeError, ValueError):
# np.isnat only handles datetime or timedelta objects
pass
return self.na_rep
elif isinstance(x, PandasObject):
return "{x}".format(x=x)
else:
# object dtype
return "{x}".format(x=formatter(x))
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notna(vals)
leading_space = self.leading_space
if leading_space is None:
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(" {v}".format(v=_format(v)))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
if leading_space is False:
# False specifically, so that the default is
# to include a space if we get here.
tpl = "{v}"
else:
tpl = " {v}"
fmt_values.append(tpl.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
# GH21625, GH22270
self.fixed_width = False
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != ".":
def decimal_formatter(v):
return base_formatter(v).replace(".", self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == "left":
na_rep = " " + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, "to_dense"): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype="object")
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]]
)
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial(
"{value: .{digits:d}f}".format, digits=self.digits
)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid="ignore"):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = (
(abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)
).any()
if has_small_values or (too_long and has_large_values):
float_format = partial("{value: .{digits:d}e}".format, digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: "{x: d}".format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep="NaT", date_format=None, **kwargs):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep,
).reshape(values.shape)
return fmt_values.tolist()
class ExtensionArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
values = self.values
if isinstance(values, (ABCIndexClass, ABCSeries)):
values = values._values
formatter = values._formatter(boxed=True)
if is_categorical_dtype(values.dtype):
# Categorical is special for now, so that we can preserve tzinfo
array = values._internal_get_values()
else:
array = np.asarray(values)
fmt_values = format_array(
array,
formatter,
float_format=self.float_format,
na_rep=self.na_rep,
digits=self.digits,
space=self.space,
justify=self.justify,
leading_space=self.leading_space,
)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid="ignore"):
if (
not is_numeric_dtype(percentiles)
or not np.all(percentiles >= 0)
or not np.all(percentiles <= 1)
):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = np.isclose(percentiles.astype(int), percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + "%" for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(
np.log10(np.min(np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)))
).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + "%" for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
assert values.ndim == 1
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % int(one_day_nanos) != 0).sum() == 0
)
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep="NaT"):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
if getattr(x, "tzinfo", None) is not None:
x = Timestamp(x).tz_convert(tz)
else:
x = Timestamp(x).tz_localize(tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep="NaT", date_format=None):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep="NaT", date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format
)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
if isinstance(values, np.ndarray) and values.ndim > 1:
# We don't actaully care about the order of values, and DatetimeIndex
# only accepts 1D values
values = values.ravel()
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = self.formatter or _get_format_datetime64(
is_dates_only, date_format=self.date_format
)
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep="NaT", box=False, **kwargs):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = self.formatter or _get_format_timedelta64(
self.values, nat_rep=self.nat_rep, box=self.box
)
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep="NaT", box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = 86400 * 1e9
even_days = (
np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
)
all_sub_day = (
np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
)
if even_days:
format = None
elif all_sub_day:
format = "sub_day"
else:
format = "long"
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
def _make_fixed_width(strings, justify="right", minimum=None, adj=None):
if len(strings) == 0 or justify == "all":
return strings
if adj is None:
adj = _get_adjustment()
max_len = max(adj.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[: max_len - 3] + "..."
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros_complex(str_complexes, na_rep="NaN"):
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
return [
"".join(_trim_zeros_float(re.split(r"([j+-])", x), na_rep))
for x in str_complexes
]
def _trim_zeros_float(str_floats, na_rep="NaN"):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _is_number(x):
return x != na_rep and not x.endswith("inf")
def _cond(values):
finite = [x for x in values if _is_number(x)]
return (
len(finite) > 0
and all(x.endswith("0") for x in finite)
and not (any(("e" in x) or ("E" in x) for x in finite))
)
while _cond(trimmed):
trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith(".") and _is_number(x) else x for x in trimmed]
def _has_names(index):
if isinstance(index, ABCMultiIndex):
return com._any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter:
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y",
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return "NaN"
if decimal.Decimal.is_infinite(dnum):
return "inf"
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = "E-{pow10:02d}".format(pow10=-int_pow10)
else:
prefix = "E+{pow10:02d}".format(pow10=int_pow10)
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = "{{mant: .{acc:d}f}}{{prefix}}".format(acc=self.accuracy)
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
def get_level_lengths(levels, sentinel=""):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
-------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
def buffer_put_lines(buf, lines):
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write("\n".join(lines))
| apache-2.0 |
jorge2703/scikit-learn | sklearn/tests/test_cross_validation.py | 19 | 44125 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
thiagopnts/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
tawsifkhan/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
AtsushiHashimoto/exp_idc | tools/spectral_gap.py | 1 | 11524 | #!/usr/bin/env python
# coding: utf-8
import tempfile
import subprocess
from os.path import dirname
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_random_state, check_symmetric
from sklearn.utils.validation import check_array
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster.k_means_ import k_means
from sklearn.cluster.spectral import discretize
from sklearn.manifold.spectral_embedding_ import _graph_is_connected,_set_diag
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.arpack import eigsh
from sklearn.utils.extmath import _deterministic_vector_sign_flip
import numpy as np
class SpectralClusteringSG(BaseEstimator, ClusterMixin):
# src_pat ex.) "^.*/X_(\d{3}).csv$"
def __init__(self,
max_clusters,\
n_init=10, gamma=1., n_neighbors=10,\
eigen_tol=0.0, degree=3, coef0=1,
kernel_params = None,\
eigen_solver = 'arpack',\
random_state = 0, \
affinity = 'precomputed',\
assign_labels = 'discretize'):
self.max_clusters = max_clusters
self.n_init = n_init
self.gamma = gamma
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.degree = degree
self.coef0=coef0
self.kernel_params = kernel_params
self.eigen_solver = eigen_solver
self.random_state = random_state
self.affinity = affinity
self.assign_labels = assign_labels
def fit(self,X,y=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = self.spectral_clustering_sg(self.affinity_matrix_,
max_clusters=self.max_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
def estimate_num_of_clusters(self,lambdas_list):
dif = list()
for i in range(1, len(lambdas_list)-1):
lambda_K0 = lambdas_list[i]
lambda_K1 = lambdas_list[i+1]
dif.append(lambda_K0 - lambda_K1)
return dif.index(max(dif))+2
def spectral_clustering_sg(self, affinity, max_clusters=8,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = max_clusters
maps, lambdas = self.spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
# determin n_clusters by Spectral Gap HERE!!
n_clusters = self.estimate_num_of_clusters(lambdas)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=0,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
def spectral_embedding(self,adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,drop_first=True):
"""
see original at https://github.com/scikit-learn/scikit-learn/blob/14031f6/sklearn/manifold/spectral_embedding_.py#L133
custermize1: return lambdas with the embedded matrix.
custermize2: norm_laplacian is always True
"""
norm_laplacian=True
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack'
or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = _set_diag(laplacian, 1,norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T, lambdas
else:
return embedding[:n_components].T, lambdas
| bsd-2-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.