repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hainm/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
ua-snap/downscale | old/bin/hur_cru_ts31_to_cl20_downscaling.py | 3 | 22421 | # # #
# Current implementation of the cru ts31 (ts32) delta downscaling procedure
#
# Author: Michael Lindgren ([email protected])
# # #
import numpy as np
def write_gtiff( output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details. This is just a common one that is
supported here.
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
# new_ext = bounds_to_extent( new_bounds )
return new_bounds
def xyz_to_grid( x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', linear
'''
import numpy as np
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def generate_anomalies( df, meshgrid_tuple, lons_pcll, template_raster_fn, src_transform, src_crs, src_nodata, output_filename, *args, **kwargs ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
RETURNS:
[str] path to the output filename generated
'''
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
template_meta.update( compress='lzw', crs={'init':'epsg:3338'} )
interp_arr = xyz_to_grid( np.array(df['lon'].tolist()), \
np.array(df['lat'].tolist()), \
np.array(df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.cubic_spline, num_threads=1, SOURCE_EXTRA=1000 )
# mask it with the internal mask in the template raster, where 0 is oob.
output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
output_arr.fill_value = template_meta[ 'nodata' ]
output_arr = output_arr.filled()
return write_gtiff( output_arr, template_meta, output_filename, compress=True )
def fn_month_grouper( x ):
'''
take a filename and return the month element of the naming convention
'''
return os.path.splitext(os.path.basename(x))[0].split( '_' )[5]
def convert_to_hur( tas_arr, vap_arr ):
esa_arr = 6.112 * np.exp( 17.62 * tas_arr/ (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46 * tas_arr / (272.62 + tas_arr) )
return vap_arr/esa_arr * 100
def convert_to_vap( tas_arr, hur_arr ):
''' create relative humidity from the CRU tas / vap '''
esa_arr = 6.112 * np.exp( 17.62 * tas_arr/ (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46 * tas_arr / (272.62 + tas_arr) )
return (hur_arr*esa_arr)/100
def downscale_cru_historical( file_list, cru_cl20_arr, output_path, downscaling_operation ):
'''
take a list of cru_historical anomalies filenames, groupby month,
then downscale with the cru_cl20 climatology as a numpy 2d ndarray
that is also on the same grid as the anomalies files.
(intended to be the akcan 1km/2km extent).
operation can be one of 'mult', 'add', 'div' and represents the
downscaling operation to be use to scale the anomalies on top of the baseline.
this is based on how the anomalies were initially calculated.
RETURNS:
output path location of the new downscaled files.
'''
from functools import partial
def f( anomaly_fn, baseline_arr, output_path, downscaling_operation ):
def add( cru, anom ):
return cru + anom
def mult( cru, anom ):
return cru * anom
def div( cru, anom ):
# return cru / anom
# this one may not be useful, but the placeholder is here
return NotImplementedError
cru_ts31 = rasterio.open( anomaly_fn )
meta = cru_ts31.meta
meta.update( compress='lzw', crs={'init':'epsg:3338'} )
cru_ts31 = cru_ts31.read( 1 )
operation_switch = { 'add':add, 'mult':mult, 'div':div }
# this is hardwired stuff for this fairly hardwired script.
output_filename = os.path.basename( anomaly_fn ).replace( 'anom', 'downscaled' )
output_filename = os.path.join( output_path, output_filename )
# both files need to be masked here since we use a RIDICULOUS oob value...
# for both tas and cld, values less than -200 are out of the range of acceptable values and it
# grabs the -3.4... mask values. so lets mask using this
baseline_arr = np.ma.masked_where( baseline_arr < -200, baseline_arr )
cru_ts31 = np.ma.masked_where( cru_ts31 < -200, cru_ts31 )
output_arr = operation_switch[ downscaling_operation ]( baseline_arr, cru_ts31 )
output_arr[ np.isinf(output_arr) ] = meta[ 'nodata' ]
# remove errant >100% humidity values from the mix -- Steph McAfee indicated that making all 99.0 is ok
output_arr[ (output_arr > 100) & (output_arr < 500) ] = 99 # <500 is there to keep from grabbing the mask.
if 'transform' in meta.keys():
meta.pop( 'transform' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( output_arr, 1 )
return output_filename
partial_f = partial( f, baseline_arr=cru_cl20_arr, output_path=output_path, downscaling_operation=downscaling_operation )
cru_ts31 = file_list.apply( lambda fn: partial_f( anomaly_fn=fn ) )
return output_path
if __name__ == '__main__':
import rasterio, xray, os, glob, affine
from rasterio.warp import reproject, RESAMPLING
import geopandas as gpd
import pandas as pd
import numpy as np
from collections import OrderedDict
from shapely.geometry import Point
from pathos import multiprocessing as mp
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-hhi", "--cru_ts31_vap", action='store', dest='cru_ts31_vap', type=str, help="path to historical CRU TS3.1 vap input NetCDF file" )
parser.add_argument( "-thi", "--cru_ts31_tas", action='store', dest='cru_ts31_tas', type=str, help="path to historical CRU TS3.1 tas input NetCDF file" )
parser.add_argument( "-ci", "--cl20_path", action='store', dest='cl20_path', type=str, help="path to historical CRU TS2.0 Climatology input directory in single-band GTiff Format" )
parser.add_argument( "-tr", "--template_raster_fn", action='store', dest='template_raster_fn', type=str, help="path to ALFRESCO Formatted template raster to match outputs to." )
parser.add_argument( "-base", "--base_path", action='store', dest='base_path', type=str, help="string path to the folder to put the output files into" )
parser.add_argument( "-bt", "--year_begin", action='store', dest='year_begin', type=int, help="string in format YYYY of the beginning year in the series" )
parser.add_argument( "-et", "--year_end", action='store', dest='year_end', type=int, help="string in format YYYY of the ending year in the series" )
parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYY of the beginning year of the climatology period" )
parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYY of the ending year of the climatology period" )
parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
parser.add_argument( "-dso", "--downscaling_operation", action='store', dest='downscaling_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="string of the abbreviation used to identify the variable (i.e. cld)." )
# parse args
args = parser.parse_args()
# unpack args
ncores = args.ncores
base_path = args.base_path
cru_ts31_vap = args.cru_ts31_vap
cru_ts31_tas = args.cru_ts31_tas
cl20_path = args.cl20_path
template_raster_fn = args.template_raster_fn
anomalies_calc_type = args.anomalies_calc_type
downscaling_operation = args.downscaling_operation
climatology_begin = args.climatology_begin
climatology_end = args.climatology_end
year_begin = args.year_begin
year_end = args.year_end
variable = args.variable
metric = args.metric
# make some output directories if they are not there already to dump
# our output files
anomalies_path = os.path.join( base_path, variable, 'anom' )
if not os.path.exists( anomalies_path ):
os.makedirs( anomalies_path )
downscaled_path = os.path.join( base_path, variable, 'downscaled' )
if not os.path.exists( downscaled_path ):
os.makedirs( downscaled_path )
# open with xray
cru_ts31_vap = xray.open_dataset( cru_ts31_vap )
cru_ts31_tas = xray.open_dataset( cru_ts31_tas )
# unpack the data
cru_ts31_vap = cru_ts31_vap.vap
cru_ts31_tas = cru_ts31_tas.tmp # varname is tmp in the CRU-universe
# create relative humidity from the CRU TS3.x tas / vap
# now it is a standard downscale from here.
cru_ts31 = convert_to_hur( cru_ts31_tas, cru_ts31_vap )
# open template raster
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
template_meta.update( crs={'init':'epsg:3338'} )
# make a mask with values of 0=nodata and 1=data
template_raster_mask = template_raster.read_masks( 1 )
template_raster_mask[ template_raster_mask == 255 ] = 1
# run the downscaling for the newly generated relative humidity from Vapor Pressure data
clim_ds = cru_ts31.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds.groupby( 'time.month' ).mean( 'time' ) # [ MODIFIED: HUR from VAP Specific ]
if anomalies_calc_type == 'relative':
anomalies = cru_ts31.groupby( 'time.month' ) / climatology
if anomalies_calc_type == 'absolute':
anomalies = cru_ts31.groupby( 'time.month' ) - climatology
# rotate the anomalies to pacific centered latlong -- this is already in the greenwich latlong
dat_pcll, lons_pcll = shiftgrid( 0., anomalies, anomalies.lon.data )
# # generate an expanded extent (from the template_raster) to interpolate across
template_raster = rasterio.open( template_raster_fn )
# output_resolution = (1000.0, 1000.0) # hardwired, but we are building this for IEM which requires 1km
template_meta = template_raster.meta
# # interpolate to a new grid
# get longitudes and latitudes using meshgrid
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ] # mesh the lons/lats
# convert into GeoDataFrame and drop all the NaNs
df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in dat_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# meshgrid_tuple = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup
src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
src_crs = {'init':'epsg:4326'}
src_nodata = -9999.0
# output_filenames setup
years = np.arange( int(year_begin), int(year_end)+1, 1 ).astype( str ).tolist()
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( anomalies_path, '_'.join([ variable,metric,'cru_ts31_anom',month,year])+'.tif' )
for month, year in month_year ]
# build a list of keyword args to pass to the pool of workers.
args_list = [ {'df':df, 'meshgrid_tuple':(xi, yi), 'lons_pcll':lons_pcll, \
'template_raster_fn':template_raster_fn, 'src_transform':src_transform, \
'src_crs':src_crs, 'src_nodata':src_nodata, 'output_filename':fn } \
for df, fn in zip( df_list, output_filenames ) ]
# interpolate / reproject / resample the anomalies to match template_raster_fn
if __name__ == '__main__':
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: generate_anomalies( **args ), args_list )
# out = map( lambda args: generate_anomalies( **args ), args_list )
pool.close()
# To Complete the CRU TS3.1 Downscaling we need the following:
# read in the pre-processed CL2.0 Cloud Climatology
l = sorted( glob.glob( os.path.join( cl20_path, '*.tif' ) ) ) # this could catch you.
cl20_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, l ) }
# group the data by months
out = pd.Series( out )
out_months = out.apply( fn_month_grouper )
months_grouped = out.groupby( out_months )
# unpack groups for parallelization and make a list of tuples of arguments to pass to the downscale function
mg = [(i,j) for i,j in months_grouped ]
args_list = [ ( i[1], cl20_dict[i[0]], downscaled_path, downscaling_operation ) for i in mg ]
# downscale / write to disk
pool = mp.Pool( processes=ncores )
out = pool.map( lambda args: downscale_cru_historical( *args ), args_list )
# out = map( lambda args: downscale_cru_historical( *args ), args_list )
pool.close()
# # # # # HOW TO RUN THE APPLICATION # # # # # # #
# # input args -- argparse it
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_ts31'
# cru_ts31_vap = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.vap.dat.nc'
# cru_ts31_tas = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS31/cru_ts_3_10.1901.2009.tmp.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/hur/akcan' # hur
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative' # 'absolute'
# downscaling_operation = 'mult' # 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2009'
# variable = 'hur'
# metric = 'pct'
# args_tuples = [ ('hhi', cru_ts31_vap), ('thi', cru_ts31_tas), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -i -- hur_cru_ts31_to_cl20_downscaling.py ' + args )
# # # CRU TS 3.2.3 example run -- test for working with more up-to-date files # # #
# import os
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# ncores = '10'
# base_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323'
# cru_ts31_vap = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.vap.dat.nc'
# cru_ts31_tas = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_ts323/cru_ts3.23.1901.2014.tmp.dat.nc'
# cl20_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/hur/akcan' # hur
# template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# anomalies_calc_type = 'relative' # 'absolute'
# downscaling_operation = 'mult' # 'add'
# climatology_begin = '1961'
# climatology_end = '1990'
# year_begin = '1901'
# year_end = '2014'
# variable = 'hur'
# metric = 'pct'
# args_tuples = [ ('hhi', cru_ts31_vap), ('thi', cru_ts31_tas), ('ci', cl20_path), ('tr', template_raster_fn),
# ('base', base_path), ('bt', year_begin), ('et', year_end),
# ('cbt', climatology_begin), ('cet', climatology_end),
# ('nc', ncores), ('at', anomalies_calc_type), ('m', metric),
# ('dso', downscaling_operation), ('v', variable) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython2.7 -i -- hur_cru_ts31_to_cl20_downscaling.py ' + args )
| mit |
r-mart/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
cqychen/quants | quants/loaddata/skyeye_ods_invest_refer_sh_margins_detail.py | 1 | 2670 | #coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def create_table(table_name):
cmd='''
create table if not exists %s
(
opDate VARCHAR (63) comment '信用交易日期'
,stockCode varchar (63) comment '股票代码'
,securityAbbr varchar (63) comment '标的证券简称'
,rzye BIGINT comment '本日融资余额(元)'
,rzmre BIGINT comment '本日融资买入额(元)'
,rzche BIGINT comment '本日融资偿还额(元)'
,rqyl BIGINT comment '本日融券余量'
,rqmcl BIGINT comment '本日融券卖出量'
,rqchl BIGINT comment '本日融券偿还量'
,PRIMARY KEY(stockCode,`opDate`)
,index(stockCode)
)DEFAULT CHARSET=utf8
'''%table_name
print (cmd)
run_mysql_cmd(cmd,conn)
def load_data_stock(stock_code):
'''
:param stock_code:传递股票代码,将其装载进入mysql
:return:
'''
start_date = get_date_add_days(get_max_date_sh_margins_detail(stock_code), 1) #获取股票最大日期
rs = ts.sh_margin_details(start=start_date, end=end_date, symbol=stock_code)#获取数据
pd.DataFrame.to_sql(rs, table_name, con=conn, flavor='mysql', if_exists='append', index=False)
def load_data():
stock_code = get_stock_info().index
total_num = len(stock_code);
tempnum = 1;
for tmp_stock_code in stock_code:
tempnum = tempnum + 1
print(tempnum,tmp_stock_code)
load_data_stock(tmp_stock_code)
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载股票日k线-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
table_name='ods_invest_refer_sh_margins_detail'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
end_date= dt.strftime('%Y-%m-%d',dt.localtime(dt.time()))
#--------------------脚本运行开始--------------------------------
create_table(table_name=table_name)
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
| epl-1.0 |
jundongl/scikit-feature | skfeature/example/test_JMI.py | 3 | 1482 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import JMI
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx,_,_ = JMI.jmi(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| gpl-2.0 |
jtwhite79/pyemu | pyemu/utils/gw_utils.py | 1 | 110032 | """MODFLOW support utilities"""
import os
from datetime import datetime
import shutil
import warnings
import numpy as np
import pandas as pd
import re
pd.options.display.max_colwidth = 100
from pyemu.pst.pst_utils import (
SFMT,
IFMT,
FFMT,
pst_config,
parse_tpl_file,
try_process_output_file,
)
from pyemu.utils.os_utils import run
from pyemu.utils.helpers import _write_df_tpl
from ..pyemu_warnings import PyemuWarning
PP_FMT = {
"name": SFMT,
"x": FFMT,
"y": FFMT,
"zone": IFMT,
"tpl": SFMT,
"parval1": FFMT,
}
PP_NAMES = ["name", "x", "y", "zone", "parval1"]
def modflow_pval_to_template_file(pval_file, tpl_file=None):
"""write a template file for a modflow parameter value file.
Args:
pval_file (`str`): the path and name of the existing modflow pval file
tpl_file (`str`, optional): template file to write. If None, use
`pval_file` +".tpl". Default is None
Note:
Uses names in the first column in the pval file as par names.
Returns:
**pandas.DataFrame**: a dataFrame with control file parameter information
"""
if tpl_file is None:
tpl_file = pval_file + ".tpl"
pval_df = pd.read_csv(
pval_file,
delim_whitespace=True,
header=None,
skiprows=2,
names=["parnme", "parval1"],
)
pval_df.index = pval_df.parnme
pval_df.loc[:, "tpl"] = pval_df.parnme.apply(lambda x: " ~ {0:15s} ~".format(x))
with open(tpl_file, "w") as f:
f.write("ptf ~\n#pval template file from pyemu\n")
f.write("{0:10d} #NP\n".format(pval_df.shape[0]))
f.write(
pval_df.loc[:, ["parnme", "tpl"]].to_string(
col_space=0,
formatters=[SFMT, SFMT],
index=False,
header=False,
justify="left",
)
)
return pval_df
def modflow_hob_to_instruction_file(hob_file, ins_file=None):
"""write an instruction file for a modflow head observation file
Args:
hob_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hob_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
"""
hob_df = pd.read_csv(
hob_file,
delim_whitespace=True,
skiprows=1,
header=None,
names=["simval", "obsval", "obsnme"],
)
hob_df.loc[:, "obsnme"] = hob_df.obsnme.apply(str.lower)
hob_df.loc[:, "ins_line"] = hob_df.obsnme.apply(lambda x: "l1 !{0:s}!".format(x))
hob_df.loc[0, "ins_line"] = hob_df.loc[0, "ins_line"].replace("l1", "l2")
if ins_file is None:
ins_file = hob_file + ".ins"
f_ins = open(ins_file, "w")
f_ins.write("pif ~\n")
f_ins.write(
hob_df.loc[:, ["ins_line"]].to_string(
col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT],
)
+ "\n"
)
hob_df.loc[:, "weight"] = 1.0
hob_df.loc[:, "obgnme"] = "obgnme"
f_ins.close()
return hob_df
def modflow_hydmod_to_instruction_file(hydmod_file, ins_file=None):
"""write an instruction file for a modflow hydmod file
Args:
hydmod_file (`str`): the path and name of the existing modflow hob file
ins_file (`str`, optional): the name of the instruction file to write.
If `None`, `hydmod_file` +".ins" is used. Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with control file observation information
Note:
calls `pyemu.gw_utils.modflow_read_hydmod_file()`
"""
hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file)
hydmod_df.loc[:, "ins_line"] = hydmod_df.obsnme.apply(
lambda x: "l1 w !{0:s}!".format(x)
)
if ins_file is None:
ins_file = hydmod_outfile + ".ins"
with open(ins_file, "w") as f_ins:
f_ins.write("pif ~\nl1\n")
f_ins.write(
hydmod_df.loc[:, ["ins_line"]].to_string(
col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT],
)
+ "\n"
)
hydmod_df.loc[:, "weight"] = 1.0
hydmod_df.loc[:, "obgnme"] = "obgnme"
df = try_process_output_file(hydmod_outfile + ".ins")
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df.to_csv("_setup_" + os.path.split(hydmod_outfile)[-1] + ".csv", index=False)
return df
return hydmod_df
def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None):
"""read a binary hydmod file and return a dataframe of the results
Args:
hydmod_file (`str`): The path and name of the existing modflow hydmod binary file
hydmod_outfile (`str`, optional): output file to write. If `None`, use `hydmod_file` +".dat".
Default is `None`.
Returns:
**pandas.DataFrame**: a dataFrame with hymod_file values
"""
try:
import flopy.utils as fu
except Exception as e:
print("flopy is not installed - cannot read {0}\n{1}".format(hydmod_file, e))
return
obs = fu.HydmodObs(hydmod_file)
hyd_df = obs.get_dataframe()
hyd_df.columns = [i[2:] if i.lower() != "totim" else i for i in hyd_df.columns]
# hyd_df.loc[:,"datetime"] = hyd_df.index
hyd_df["totim"] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d"))
hyd_df.rename(columns={"totim": "datestamp"}, inplace=True)
# reshape into a single column
hyd_df = pd.melt(hyd_df, id_vars="datestamp")
hyd_df.rename(columns={"value": "obsval"}, inplace=True)
hyd_df["obsnme"] = [
i.lower() + "_" + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp)
]
vc = hyd_df.obsnme.value_counts().sort_values()
vc = list(vc.loc[vc > 1].index.values)
if len(vc) > 0:
hyd_df.to_csv("hyd_df.duplciates.csv")
obs.get_dataframe().to_csv("hyd_org.duplicates.csv")
raise Exception("duplicates in obsnme:{0}".format(vc))
# assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme"
if not hydmod_outfile:
hydmod_outfile = hydmod_file + ".dat"
hyd_df.to_csv(hydmod_outfile, columns=["obsnme", "obsval"], sep=" ", index=False)
# hyd_df = hyd_df[['obsnme','obsval']]
return hyd_df[["obsnme", "obsval"]], hydmod_outfile
def setup_mtlist_budget_obs(
list_filename,
gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",
gw_prefix="gw",
sw_prefix="sw",
save_setup_file=False,
):
"""setup observations of gw (and optionally sw) mass budgets from mt3dusgs list file.
Args:
list_filename (`str`): path and name of existing modflow list file
gw_filename (`str`, optional): output filename that will contain the gw budget
observations. Default is "mtlist_gw.dat"
sw_filename (`str`, optional): output filename that will contain the sw budget
observations. Default is "mtlist_sw.dat"
start_datetime (`str`, optional): an str that can be parsed into a `pandas.TimeStamp`.
used to give budget observations meaningful names. Default is "1-1-1970".
gw_prefix (`str`, optional): a prefix to add to the GW budget observations.
Useful if processing more than one list file as part of the forward run process.
Default is 'gw'.
sw_prefix (`str`, optional): a prefix to add to the SW budget observations. Useful
if processing more than one list file as part of the forward run process.
Default is 'sw'.
save_setup_file (`bool`, optional): a flag to save "_setup_"+ `list_filename` +".csv" file
that contains useful control file information. Default is `False`.
Returns:
tuple containing
- **str**: the command to add to the forward run script
- **str**: the names of the instruction files that were created
- **pandas.DataFrame**: a dataframe with information for constructing a control file
Note:
writes an instruction file and also a _setup_.csv to use when constructing a pest
control file
The instruction files are named `out_filename` +".ins"
It is recommended to use the default value for `gw_filename` or `sw_filename`.
This is the companion function of `gw_utils.apply_mtlist_budget_obs()`.
"""
gw, sw = apply_mtlist_budget_obs(
list_filename, gw_filename, sw_filename, start_datetime
)
gw_ins = gw_filename + ".ins"
_write_mtlist_ins(gw_ins, gw, gw_prefix)
ins_files = [gw_ins]
df_gw = try_process_output_file(gw_ins, gw_filename)
if df_gw is None:
raise Exception("error processing groundwater instruction file")
if sw is not None:
sw_ins = sw_filename + ".ins"
_write_mtlist_ins(sw_ins, sw, sw_prefix)
ins_files.append(sw_ins)
df_sw = try_process_output_file(sw_ins, sw_filename)
if df_sw is None:
raise Exception("error processing surface water instruction file")
df_gw = df_gw.append(df_sw)
df_gw.loc[:, "obsnme"] = df_gw.index.values
if save_setup_file:
df_gw.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False)
frun_line = "pyemu.gw_utils.apply_mtlist_budget_obs('{0}')".format(list_filename)
return frun_line, ins_files, df_gw
def _write_mtlist_ins(ins_filename, df, prefix):
"""write an instruction file for a MT3D-USGS list file"""
try:
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
except:
dt_str = df.index.map(lambda x: "{0:08.1f}".format(x).strip())
with open(ins_filename, "w") as f:
f.write("pif ~\nl1\n")
for dt in dt_str:
f.write("l1 ")
for col in df.columns.str.translate(
{ord(s): None for s in ["(", ")", "/", "="]}
):
if prefix == "":
obsnme = "{0}_{1}".format(col, dt)
else:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def apply_mtlist_budget_obs(
list_filename,
gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970",
):
"""process an MT3D-USGS list file to extract mass budget entries.
Args:
list_filename (`str`): the path and name of an existing MT3D-USGS list file
gw_filename (`str`, optional): the name of the output file with gw mass
budget information. Default is "mtlist_gw.dat"
sw_filename (`str`): the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime (`str`): an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns:
2-element tuple containing
- **pandas.DataFrame**: the gw mass budget dataframe
- **pandas.DataFrame**: (optional) the sw mass budget dataframe.
If the SFT process is not active, this returned value is `None`.
Note:
This is the companion function of `gw_utils.setup_mtlist_budget_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
gw = gw.drop(
[
col
for col in gw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))
],
axis=1,
)
gw.to_csv(gw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
if sw is not None:
sw = sw.drop(
[
col
for col in sw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))
],
axis=1,
)
sw.to_csv(sw_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
return gw, sw
def setup_mflist_budget_obs(
list_filename,
flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1'1970",
prefix="",
save_setup_file=False,
specify_times=None,
):
"""setup observations of budget volume and flux from modflow list file.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the budget flux
observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the budget volume
observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a pandas.TimeStamp.
This is used to give budget observations meaningful names. Default is "1-1-1970".
prefix (`str`, optional): a prefix to add to the water budget observations. Useful if
processing more than one list file as part of the forward run process. Default is ''.
save_setup_file (`bool`): a flag to save "_setup_"+ `list_filename` +".csv" file that contains useful
control file information
specify_times (`np.ndarray`-like, optional): An array of times to
extract from the budget dataframes returned by the flopy
MfListBudget(list_filename).get_dataframe() method. This can be
useful to ensure consistent observation times for PEST.
Array needs to be alignable with index of dataframe
return by flopy method, care should be take to ensure that
this is the case. If passed will be written to
"budget_times.config" file as strings to be read by the companion
`apply_mflist_budget_obs()` method at run time.
Returns:
**pandas.DataFrame**: a dataframe with information for constructing a control file.
Note:
This method writes instruction files and also a _setup_.csv to use when constructing a pest
control file. The instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
This is the companion function of `gw_utils.apply_mflist_budget_obs()`.
"""
flx, vol = apply_mflist_budget_obs(
list_filename, flx_filename, vol_filename, start_datetime, times=specify_times
)
_write_mflist_ins(flx_filename + ".ins", flx, prefix + "flx")
_write_mflist_ins(vol_filename + ".ins", vol, prefix + "vol")
df = try_process_output_file(flx_filename + ".ins")
if df is None:
raise Exception("error processing flux instruction file")
df2 = try_process_output_file(vol_filename + ".ins")
if df2 is None:
raise Exception("error processing volume instruction file")
df = df.append(df2)
df.loc[:, "obsnme"] = df.index.values
if save_setup_file:
df.to_csv("_setup_" + os.path.split(list_filename)[-1] + ".csv", index=False)
if specify_times is not None:
np.savetxt(
os.path.join(os.path.dirname(flx_filename), "budget_times.config"),
specify_times,
fmt="%s",
)
return df
def apply_mflist_budget_obs(
list_filename,
flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1-1970",
times=None,
):
"""process a MODFLOW list file to extract flux and volume water budget
entries.
Args:
list_filename (`str`): path and name of the existing modflow list file
flx_filename (`str`, optional): output filename that will contain the
budget flux observations. Default is "flux.dat"
vol_filename (`str`, optional): output filename that will contain the
budget volume observations. Default is "vol.dat"
start_datetime (`str`, optional): a string that can be parsed into a
pandas.TimeStamp. This is used to give budget observations
meaningful names. Default is "1-1-1970".
times (`np.ndarray`-like or `str`, optional): An array of times to
extract from the budget dataframes returned by the flopy
MfListBudget(list_filename).get_dataframe() method. This can be
useful to ensure consistent observation times for PEST.
If type `str`, will assume `times=filename` and attempt to read
single vector (no header or index) from file, parsing datetime
using pandas. Array needs to be alignable with index of dataframe
return by flopy method, care should be take to ensure that
this is the case. If setup with `setup_mflist_budget_obs()`
specifying `specify_times` argument `times` should be set to
"budget_times.config".
Note:
This is the companion function of `gw_utils.setup_mflist_budget_obs()`.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with flux budget information
- **pandas.DataFrame**: a dataframe with cumulative budget information
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mlf = flopy.utils.MfListBudget(list_filename)
flx, vol = mlf.get_dataframes(start_datetime=start_datetime, diff=True)
if times is not None:
if isinstance(times, str):
if vol.index.tzinfo:
parse_date = {"t": [0]}
names = [None]
else:
parse_date = False
names = ["t"]
times = pd.read_csv(
times, header=None, names=names, parse_dates=parse_date
)["t"].values
flx = flx.loc[times]
vol = vol.loc[times]
flx.to_csv(flx_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
vol.to_csv(vol_filename, sep=" ", index_label="datetime", date_format="%Y%m%d")
return flx, vol
def _write_mflist_ins(ins_filename, df, prefix):
"""write an instruction file for a MODFLOW list file"""
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
with open(ins_filename, "w") as f:
f.write("pif ~\nl1\n")
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
obsnme = "{0}_{1}_{2}".format(prefix, col, dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n")
def setup_hds_timeseries(
bin_file,
kij_dict,
prefix=None,
include_path=False,
model=None,
postprocess_inact=None,
text=None,
fill=None,
precision="single",
):
"""a function to setup a forward process to extract time-series style values
from a binary modflow binary file (or equivalent format - hds, ucn, sub, cbb, etc).
Args:
bin_file (`str`): path and name of existing modflow binary file - headsave, cell budget and MT3D UCN supported.
kij_dict (`dict`): dictionary of site_name: [k,i,j] pairs. For example: `{"wel1":[0,1,1]}`.
prefix (`str`, optional): string to prepend to site_name when forming observation names. Default is None
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
model (`flopy.mbase`, optional): a `flopy.basemodel` instance. If passed, the observation names will
have the datetime of the observation appended to them (using the flopy `start_datetime` attribute.
If None, the observation names will have the zero-based stress period appended to them. Default is None.
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
text (`str`): the text record entry in the binary file (e.g. "constant_head").
Used to indicate that the binary file is a MODFLOW cell-by-cell budget file.
If None, headsave or MT3D unformatted concentration file
is assummed. Default is None
fill (`float`): fill value for NaNs in the extracted timeseries dataframe. If
`None`, no filling is done, which may yield model run failures as the resulting
processed timeseries CSV file (produced at runtime) may have missing values and
can't be processed with the cooresponding instruction file. Default is `None`.
precision (`str`): the precision of the binary file. Can be "single" or "double".
Default is "single".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
This function writes hds_timeseries.config that must be in the same
dir where `apply_hds_timeseries()` is called during the forward run
Assumes model time units are days
This is the companion function of `gw_utils.apply_hds_timeseries()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(bin_file), "binary file not found"
iscbc = False
if text is not None:
text = text.upper()
try:
# hack: if model is passed and its None, it trips up CellBudgetFile...
if model is not None:
bf = flopy.utils.CellBudgetFile(
bin_file, precision=precision, model=model
)
iscbc = True
else:
bf = flopy.utils.CellBudgetFile(bin_file, precision=precision)
iscbc = True
except Exception as e:
try:
if model is not None:
bf = flopy.utils.HeadFile(
bin_file, precision=precision, model=model, text=text
)
else:
bf = flopy.utils.HeadFile(bin_file, precision=precision, text=text)
except Exception as e1:
raise Exception(
"error instantiating binary file as either CellBudgetFile:{0} or as HeadFile with text arg: {1}".format(
str(e), str(e1)
)
)
if iscbc:
tl = [t.decode().strip() for t in bf.textlist]
if text not in tl:
raise Exception(
"'text' {0} not found in CellBudgetFile.textlist:{1}".format(
text, tl
)
)
elif bin_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bin_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
bf = flopy.utils.HeadFile(bin_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if text is None:
text = "none"
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
# if include_path:
# pth = os.path.join(*[p for p in os.path.split(hds_file)[:-1]])
# config_file = os.path.join(pth,"{0}_timeseries.config".format(hds_file))
# else:
config_file = "{0}_timeseries.config".format(bin_file)
print("writing config file to {0}".format(config_file))
if fill is None:
fill = "none"
f_config = open(config_file, "w")
if model is not None:
if model.dis.itmuni != 4:
warnings.warn(
"setup_hds_timeseries only supports 'days' time units...", PyemuWarning
)
f_config.write(
"{0},{1},d,{2},{3},{4},{5}\n".format(
os.path.split(bin_file)[-1],
model.start_datetime,
text,
fill,
precision,
iscbc,
)
)
start = pd.to_datetime(model.start_datetime)
else:
f_config.write(
"{0},none,none,{1},{2},{3},{4}\n".format(
os.path.split(bin_file)[-1], text, fill, precision, iscbc
)
)
f_config.write("site,k,i,j\n")
dfs = []
for site, (k, i, j) in kij_dict.items():
assert k >= 0 and k < nlay, k
assert i >= 0 and i < nrow, i
assert j >= 0 and j < ncol, j
site = site.lower().replace(" ", "")
if iscbc:
ts = bf.get_ts((k, i, j), text=text)
# print(ts)
df = pd.DataFrame(data=ts, columns=["totim", site])
else:
df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site])
if model is not None:
dts = start + pd.to_timedelta(df.totim, unit="d")
df.loc[:, "totim"] = dts
# print(df)
f_config.write("{0},{1},{2},{3}\n".format(site, k, i, j))
df.index = df.pop("totim")
dfs.append(df)
f_config.close()
df = pd.concat(dfs, axis=1).T
df.to_csv(bin_file + "_timeseries.processed", sep=" ")
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
ins_file = bin_file + "_timeseries.processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for site in df.index:
# for t in t_str:
f.write("l1 w ")
# for site in df.columns:
for t in t_str:
if prefix is not None:
obsnme = "{0}_{1}_{2}".format(prefix, site, t)
else:
obsnme = "{0}_{1}".format(site, t)
f.write(" !{0}!".format(obsnme))
f.write("\n")
if postprocess_inact is not None:
_setup_postprocess_hds_timeseries(
bin_file, df, config_file, prefix=prefix, model=model
)
bd = "."
if include_path:
bd = os.getcwd()
pth = os.path.join(*[p for p in os.path.split(bin_file)[:-1]])
os.chdir(pth)
config_file = os.path.split(config_file)[-1]
try:
df = apply_hds_timeseries(config_file, postprocess_inact=postprocess_inact)
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_hds_timeseries(): {0}".format(str(e)))
os.chdir(bd)
df = try_process_output_file(ins_file)
if df is None:
raise Exception("error processing {0} instruction file".format(ins_file))
df.loc[:, "weight"] = 0.0
if prefix is not None:
df.loc[:, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:2]))
else:
df.loc[:, "obgnme"] = df.index.map(lambda x: x.split("_")[0])
frun_line = "pyemu.gw_utils.apply_hds_timeseries('{0}',{1})\n".format(
config_file, postprocess_inact
)
return frun_line, df
def apply_hds_timeseries(config_file=None, postprocess_inact=None):
"""process a modflow binary file using a previously written
configuration file
Args:
config_file (`str`, optional): configuration file written by `pyemu.gw_utils.setup_hds_timeseries`.
If `None`, looks for `hds_timeseries.config`
postprocess_inact (`float`, optional): Inactive value in heads/ucn file e.g. mt.btn.cinit. If `None`, no
inactive value processing happens. Default is `None`.
Note:
This is the companion function of `gw_utils.setup_hds_timeseries()`.
"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file, "r") as f:
line = f.readline()
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
if len(line.strip().split(",")) == 6:
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
) = line.strip().split(",")
_iscbc = "false"
else:
(
bf_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
site_df = pd.read_csv(f)
text = text.upper()
if _iscbc.lower().strip() == "false":
iscbc = False
elif _iscbc.lower().strip() == "true":
iscbc = True
else:
raise Exception(
"apply_hds_timeseries() error: unrecognized 'iscbc' string in config file: {0}".format(
_iscbc
)
)
assert os.path.exists(bf_file), "head save file not found"
if iscbc:
try:
bf = flopy.utils.CellBudgetFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating CellBudgetFile:{0}".format(str(e)))
elif bf_file.lower().endswith(".ucn"):
try:
bf = flopy.utils.UcnFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
bf = flopy.utils.HeadFile(bf_file, text=text, precision=precision)
else:
bf = flopy.utils.HeadFile(bf_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = bf.nlay, bf.nrow, bf.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if iscbc:
df = pd.DataFrame(
data=bf.get_ts((k, i, j), text=text), columns=["totim", site]
)
else:
df = pd.DataFrame(data=bf.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
dfs.append(df)
df = pd.concat(dfs, axis=1).T
if df.shape != df.dropna().shape:
warnings.warn("NANs in processed timeseries file", PyemuWarning)
if fill.upper() != "NONE":
fill = float(fill)
df.fillna(fill, inplace=True)
# print(df)
df.to_csv(bf_file + "_timeseries.processed", sep=" ")
if postprocess_inact is not None:
_apply_postprocess_hds_timeseries(config_file, postprocess_inact)
return df
def _setup_postprocess_hds_timeseries(
hds_file, df, config_file, prefix=None, model=None
):
"""Dirty function to setup post processing concentrations in inactive/dry cells"""
warnings.warn(
"Setting up post processing of hds or ucn timeseries obs. "
"Prepending 'pp' to obs name may cause length to exceed 20 chars",
PyemuWarning,
)
if model is not None:
t_str = df.columns.map(lambda x: x.strftime("%Y%m%d"))
else:
t_str = df.columns.map(lambda x: "{0:08.2f}".format(x))
if prefix is not None:
prefix = "pp{0}".format(prefix)
else:
prefix = "pp"
ins_file = hds_file + "_timeseries.post_processed.ins"
print("writing instruction file to {0}".format(ins_file))
with open(ins_file, "w") as f:
f.write("pif ~\n")
f.write("l1 \n")
for site in df.index:
f.write("l1 w ")
# for site in df.columns:
for t in t_str:
obsnme = "{0}{1}_{2}".format(prefix, site, t)
f.write(" !{0}!".format(obsnme))
f.write("\n")
frun_line = "pyemu.gw_utils._apply_postprocess_hds_timeseries('{0}')\n".format(
config_file
)
return frun_line
def _apply_postprocess_hds_timeseries(config_file=None, cinact=1e30):
"""private function to post processing binary files"""
import flopy
if config_file is None:
config_file = "hds_timeseries.config"
assert os.path.exists(config_file), config_file
with open(config_file, "r") as f:
line = f.readline()
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
if len(line.strip().split(",")) == 6:
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
) = line.strip().split(",")
_iscbc = "false"
else:
(
hds_file,
start_datetime,
time_units,
text,
fill,
precision,
_iscbc,
) = line.strip().split(",")
site_df = pd.read_csv(f)
# print(site_df)
text = text.upper()
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file, precision=precision)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
else:
try:
if text != "NONE":
hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
nlay, nrow, ncol = hds.nlay, hds.nrow, hds.ncol
dfs = []
for site, k, i, j in zip(site_df.site, site_df.k, site_df.i, site_df.j):
assert k >= 0 and k < nlay
assert i >= 0 and i < nrow
assert j >= 0 and j < ncol
if text.upper() != "NONE":
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
else:
df = pd.DataFrame(data=hds.get_ts((k, i, j)), columns=["totim", site])
df.index = df.pop("totim")
inact_obs = df[site].apply(lambda x: np.isclose(x, cinact))
if inact_obs.sum() > 0:
assert k + 1 < nlay, "Inactive observation in lowest layer"
df_lower = pd.DataFrame(
data=hds.get_ts((k + 1, i, j)), columns=["totim", site]
)
df_lower.index = df_lower.pop("totim")
df.loc[inact_obs] = df_lower.loc[inact_obs]
print(
"{0} observation(s) post-processed for site {1} at kij ({2},{3},{4})".format(
inact_obs.sum(), site, k, i, j
)
)
dfs.append(df)
df = pd.concat(dfs, axis=1).T
# print(df)
df.to_csv(hds_file + "_timeseries.post_processed", sep=" ")
return df
def setup_hds_obs(
hds_file,
kperk_pairs=None,
skip=None,
prefix="hds",
text="head",
precision="single",
include_path=False,
):
"""a function to setup using all values from a layer-stress period
pair for observations.
Args:
hds_file (`str`): path and name of an existing MODFLOW head-save file.
If the hds_file endswith 'ucn', then the file is treated as a UcnFile type.
kperk_pairs ([(int,int)]): a list of len two tuples which are pairs of kper
(zero-based stress period index) and k (zero-based layer index) to
setup observations for. If None, then all layers and stress period records
found in the file will be used. Caution: a shit-ton of observations may be produced!
skip (variable, optional): a value or function used to determine which values
to skip when setting up observations. If np.scalar(skip)
is True, then values equal to skip will not be used.
If skip can also be a np.ndarry with dimensions equal to the model.
Observations are set up only for cells with Non-zero values in the array.
If not np.ndarray or np.scalar(skip), then skip will be treated as a lambda function that
returns np.NaN if the value should be skipped.
prefix (`str`): the prefix to use for the observation names. default is "hds".
text (`str`): the text tag the flopy HeadFile instance. Default is "head"
precison (`str`): the precision string for the flopy HeadFile instance. Default is "single"
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
Returns:
tuple containing
- **str**: the forward run script line needed to execute the headsave file observation
operation
- **pandas.DataFrame**: a dataframe of pest control file information
Note:
Writes an instruction file and a _setup_ csv used construct a control file.
This is the companion function to `gw_utils.apply_hds_obs()`.
"""
try:
import flopy
except Exception as e:
print("error importing flopy, returning {0}".format(str(e)))
return
assert os.path.exists(hds_file), "head save file not found"
if hds_file.lower().endswith(".ucn"):
try:
hds = flopy.utils.UcnFile(hds_file)
except Exception as e:
raise Exception("error instantiating UcnFile:{0}".format(str(e)))
elif text.lower() == "headu":
try:
hds = flopy.utils.HeadUFile(hds_file, text=text, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
else:
try:
hds = flopy.utils.HeadFile(hds_file, text=text, precision=precision)
except Exception as e:
raise Exception("error instantiating HeadFile:{0}".format(str(e)))
if kperk_pairs is None:
kperk_pairs = []
for kstp, kper in hds.kstpkper:
kperk_pairs.extend([(kper - 1, k) for k in range(hds.nlay)])
if len(kperk_pairs) == 2:
try:
if len(kperk_pairs[0]) == 2:
pass
except:
kperk_pairs = [kperk_pairs]
# if start_datetime is not None:
# start_datetime = pd.to_datetime(start_datetime)
# dts = start_datetime + pd.to_timedelta(hds.times,unit='d')
data = {}
kpers = [kper - 1 for kstp, kper in hds.kstpkper]
for kperk_pair in kperk_pairs:
kper, k = kperk_pair
assert kper in kpers, "kper not in hds:{0}".format(kper)
assert k in range(hds.nlay), "k not in hds:{0}".format(k)
kstp = last_kstp_from_kper(hds, kper)
d = hds.get_data(kstpkper=(kstp, kper))[k]
data["{0}_{1}".format(kper, k)] = d.flatten()
# data[(kper,k)] = d.flatten()
idx, iidx, jidx = [], [], []
for _ in range(len(data)):
for i in range(hds.nrow):
iidx.extend([i for _ in range(hds.ncol)])
jidx.extend([j for j in range(hds.ncol)])
idx.extend(["i{0:04d}_j{1:04d}".format(i, j) for j in range(hds.ncol)])
idx = idx[: hds.nrow * hds.ncol]
df = pd.DataFrame(data, index=idx)
data_cols = list(df.columns)
data_cols.sort()
# df.loc[:,"iidx"] = iidx
# df.loc[:,"jidx"] = jidx
if skip is not None:
for col in data_cols:
if np.isscalar(skip):
df.loc[df.loc[:, col] == skip, col] = np.NaN
elif isinstance(skip, np.ndarray):
assert (
skip.ndim >= 2
), "skip passed as {}D array, At least 2D (<= 4D) array required".format(
skip.ndim
)
assert skip.shape[-2:] == (
hds.nrow,
hds.ncol,
), "Array dimensions of arg. skip needs to match model dimensions ({0},{1}). ({2},{3}) passed".format(
hds.nrow, hds.ncol, skip.shape[-2], skip.shape[-1]
)
if skip.ndim == 2:
print(
"2D array passed for skip, assuming constant for all layers and kper"
)
skip = np.tile(skip, (len(kpers), hds.nlay, 1, 1))
if skip.ndim == 3:
print("3D array passed for skip, assuming constant for all kper")
skip = np.tile(skip, (len(kpers), 1, 1, 1))
kper, k = [int(c) for c in col.split("_")]
df.loc[
df.index.map(
lambda x: skip[
kper,
k,
int(x.split("_")[0].strip("i")),
int(x.split("_")[1].strip("j")),
]
== 0
),
col,
] = np.NaN
else:
df.loc[:, col] = df.loc[:, col].apply(skip)
# melt to long form
df = df.melt(var_name="kperk", value_name="obsval")
# set row and col identifies
df.loc[:, "iidx"] = iidx
df.loc[:, "jidx"] = jidx
# drop nans from skip
df = df.dropna()
# set some additional identifiers
df.loc[:, "kper"] = df.kperk.apply(lambda x: int(x.split("_")[0]))
df.loc[:, "kidx"] = df.pop("kperk").apply(lambda x: int(x.split("_")[1]))
# form obs names
# def get_kper_str(kper):
# if start_datetime is not None:
# return dts[int(kper)].strftime("%Y%m%d")
# else:
# return "kper{0:04.0f}".format(kper)
fmt = prefix + "_{0:02.0f}_{1:03.0f}_{2:03.0f}_{3:03.0f}"
# df.loc[:,"obsnme"] = df.apply(lambda x: fmt.format(x.kidx,x.iidx,x.jidx,
# get_kper_str(x.kper)),axis=1)
df.loc[:, "obsnme"] = df.apply(
lambda x: fmt.format(x.kidx, x.iidx, x.jidx, x.kper), axis=1
)
df.loc[:, "ins_str"] = df.obsnme.apply(lambda x: "l1 w !{0}!".format(x))
df.loc[:, "obgnme"] = prefix
# write the instruction file
with open(hds_file + ".dat.ins", "w") as f:
f.write("pif ~\nl1\n")
df.ins_str.to_string(f, index=False, header=False)
# write the corresponding output file
df.loc[:, ["obsnme", "obsval"]].to_csv(hds_file + ".dat", sep=" ", index=False)
hds_path = os.path.dirname(hds_file)
setup_file = os.path.join(
hds_path, "_setup_{0}.csv".format(os.path.split(hds_file)[-1])
)
df.to_csv(setup_file)
if not include_path:
hds_file = os.path.split(hds_file)[-1]
fwd_run_line = (
"pyemu.gw_utils.apply_hds_obs('{0}',precision='{1}',text='{2}')\n".format(
hds_file, precision, text
)
)
df.index = df.obsnme
return fwd_run_line, df
def last_kstp_from_kper(hds, kper):
"""function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Args:
hds (`flopy.utils.HeadFile`): head save file
kper (`int`): the zero-index stress period number
Returns:
**int**: the zero-based last time step during stress period
kper in the head save file
"""
# find the last kstp with this kper
kstp = -1
for kkstp, kkper in hds.kstpkper:
if kkper == kper + 1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp
def apply_hds_obs(hds_file, inact_abs_val=1.0e20, precision="single", text="head"):
"""process a modflow head save file. A companion function to
`gw_utils.setup_hds_obs()` that is called during the forward run process
Args:
hds_file (`str`): a modflow head save filename. if hds_file ends with 'ucn',
then the file is treated as a UcnFile type.
inact_abs_val (`float`, optional): the value that marks the mininum and maximum
active value. values in the headsave file greater than `inact_abs_val` or less
than -`inact_abs_val` are reset to `inact_abs_val`
Returns:
**pandas.DataFrame**: a dataframe with extracted simulated values.
Note:
This is the companion function to `gw_utils.setup_hds_obs()`.
"""
try:
import flopy
except Exception as e:
raise Exception("apply_hds_obs(): error importing flopy: {0}".format(str(e)))
from .. import pst_utils
assert os.path.exists(hds_file)
out_file = hds_file + ".dat"
ins_file = out_file + ".ins"
assert os.path.exists(ins_file)
df = pd.DataFrame({"obsnme": pst_utils.parse_ins_file(ins_file)})
df.index = df.obsnme
# populate metdata
items = ["k", "i", "j", "kper"]
for i, item in enumerate(items):
df.loc[:, item] = df.obsnme.apply(lambda x: int(x.split("_")[i + 1]))
if hds_file.lower().endswith("ucn"):
hds = flopy.utils.UcnFile(hds_file)
elif text.lower() == "headu":
hds = flopy.utils.HeadUFile(hds_file)
else:
hds = flopy.utils.HeadFile(hds_file, precision=precision, text=text)
kpers = df.kper.unique()
df.loc[:, "obsval"] = np.NaN
for kper in kpers:
kstp = last_kstp_from_kper(hds, kper)
data = hds.get_data(kstpkper=(kstp, kper))
# jwhite 15jan2018 fix for really large values that are getting some
# trash added to them...
if text.lower() != "headu":
data[np.isnan(data)] = 0.0
data[data > np.abs(inact_abs_val)] = np.abs(inact_abs_val)
data[data < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kper = df.loc[df.kper == kper, :]
df.loc[df_kper.index, "obsval"] = data[df_kper.k, df_kper.i, df_kper.j]
else:
df_kper = df.loc[df.kper == kper, :]
for k, d in enumerate(data):
d[np.isnan(d)] = 0.0
d[d > np.abs(inact_abs_val)] = np.abs(inact_abs_val)
d[d < -np.abs(inact_abs_val)] = -np.abs(inact_abs_val)
df_kperk = df_kper.loc[df_kper.k == k, :]
df.loc[df_kperk.index, "obsval"] = d[df_kperk.i]
assert df.dropna().shape[0] == df.shape[0]
df.loc[:, ["obsnme", "obsval"]].to_csv(out_file, index=False, sep=" ")
return df
def setup_sft_obs(sft_file, ins_file=None, start_datetime=None, times=None, ncomp=1):
"""writes a post-processor and instruction file for a mt3d-usgs sft output file
Args:
sft_file (`str`): path and name of an existing sft output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create.
If None, the name is `sft_file`+".ins". Default is `None`.
start_datetime (`str`): a pandas.to_datetime() compatible str. If not None,
then the resulting observation names have the datetime
suffix. If None, the suffix is the output totim. Default
is `None`.
times ([`float`]): a list of times to make observations for. If None, all times
found in the file are used. Default is None.
ncomp (`int`): number of components in transport model. Default is 1.
Returns:
**pandas.DataFrame**: a dataframe with observation names and values for the sft simulated
concentrations.
Note:
This is the companion function to `gw_utils.apply_sft_obs()`.
"""
df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True)
df.columns = [c.lower().replace("-", "_") for c in df.columns]
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if t not in utimes:
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(",".join(missing)))
with open("sft_obs.config", "w") as f:
f.write(sft_file + "\n")
[f.write("{0:15.6E}\n".format(t)) for t in times]
df = apply_sft_obs()
utimes = df.time.unique()
for t in times:
assert t in utimes, "time {0} missing in processed dataframe".format(t)
idx = df.time.apply(lambda x: x in times)
if start_datetime is not None:
start_datetime = pd.to_datetime(start_datetime)
df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime
df.loc[:, "time_str"] = df.time_str.apply(
lambda x: datetime.strftime(x, "%Y%m%d")
)
else:
df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
df.loc[:, "ins_str"] = "l1\n"
# check for multiple components
df_times = df.loc[idx, :]
df.loc[:, "icomp"] = 1
icomp_idx = list(df.columns).index("icomp")
for t in times:
df_time = df.loc[df.time == t, :].copy()
vc = df_time.sfr_node.value_counts()
ncomp = vc.max()
assert np.all(vc.values == ncomp)
nstrm = df_time.shape[0] / ncomp
for icomp in range(ncomp):
s = int(nstrm * (icomp))
e = int(nstrm * (icomp + 1))
idxs = df_time.iloc[s:e, :].index
# df_time.iloc[nstrm*(icomp):nstrm*(icomp+1),icomp_idx.loc["icomp"] = int(icomp+1)
df_time.loc[idxs, "icomp"] = int(icomp + 1)
# df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\
# format(x.sfr_node,x.icomp,x.time_str),axis=1)
df.loc[df_time.index, "ins_str"] = df_time.apply(
lambda x: "l1 w w !sfrc{0}_{1}_{2}!\n".format(
x.sfr_node, x.icomp, x.time_str
),
axis=1,
)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = sft_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
# df = try_process_ins_file(ins_file,sft_file+".processed")
df = try_process_output_file(ins_file, sft_file + ".processed")
return df
def apply_sft_obs():
"""process an mt3d-usgs sft ASCII output file using a previous-written
config file
Returns:
**pandas.DataFrame**: a dataframe of extracted simulated outputs
Note:
This is the companion function to `gw_utils.setup_sft_obs()`.
"""
# this is for dealing with the missing 'e' problem
def try_cast(x):
try:
return float(x)
except:
return 0.0
times = []
with open("sft_obs.config") as f:
sft_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
df = pd.read_csv(sft_file, skiprows=1, delim_whitespace=True) # ,nrows=10000000)
df.columns = [c.lower().replace("-", "_") for c in df.columns]
df = df.loc[df.time.apply(lambda x: x in times), :]
# print(df.dtypes)
# normalize
for c in df.columns:
# print(c)
if not "node" in c:
df.loc[:, c] = df.loc[:, c].apply(try_cast)
# print(df.loc[df.loc[:,c].apply(lambda x : type(x) == str),:])
if df.dtypes[c] == float:
df.loc[df.loc[:, c] < 1e-30, c] = 0.0
df.loc[df.loc[:, c] > 1e30, c] = 1.0e30
df.loc[:, "sfr_node"] = df.sfr_node.apply(np.int)
df.to_csv(sft_file + ".processed", sep=" ", index=False)
return df
def setup_sfr_seg_parameters(
nam_file, model_ws=".", par_cols=None, tie_hcond=True, include_temporal_pars=None
):
"""Setup multiplier parameters for SFR segment data.
Args:
nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be
available as pathed in the nam_file. Optionally, `nam_file` can be
an existing `flopy.modflow.Modflow`.
model_ws (`str`): model workspace for flopy to load the MODFLOW model from
par_cols ([`str`]): a list of segment data entires to parameterize
tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a
given segment. Default is `True`.
include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for
each stress period. Default is None
Returns:
**pandas.DataFrame**: a dataframe with useful parameter setup information
Note:
This function handles the standard input case, not all the cryptic SFR options. Loads the
dis, bas, and sfr files with flopy using model_ws.
This is the companion function to `gw_utils.apply_sfr_seg_parameters()` .
The number (and numbering) of segment data entries must consistent across
all stress periods.
Writes `nam_file` +"_backup_.sfr" as the backup of the original sfr file
Skips values = 0.0 since multipliers don't work for these
"""
try:
import flopy
except Exception as e:
return
if par_cols is None:
par_cols = ["flow", "runoff", "hcond1", "pptsw"]
if tie_hcond:
if "hcond1" not in par_cols or "hcond2" not in par_cols:
tie_hcond = False
if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None:
m = nam_file
nam_file = m.namefile
model_ws = m.model_ws
else:
# load MODFLOW model # is this needed? could we just pass the model if it has already been read in?
m = flopy.modflow.Modflow.load(
nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False
)
if include_temporal_pars:
if include_temporal_pars is True:
tmp_par_cols = {col: range(m.dis.nper) for col in par_cols}
elif isinstance(include_temporal_pars, str):
tmp_par_cols = {include_temporal_pars: range(m.dis.nper)}
elif isinstance(include_temporal_pars, list):
tmp_par_cols = {col: range(m.dis.nper) for col in include_temporal_pars}
elif isinstance(include_temporal_pars, dict):
tmp_par_cols = include_temporal_pars
include_temporal_pars = True
else:
tmp_par_cols = {}
include_temporal_pars = False
# make backup copy of sfr file
shutil.copy(
os.path.join(model_ws, m.sfr.file_name[0]),
os.path.join(model_ws, nam_file + "_backup_.sfr"),
)
# get the segment data (dict)
segment_data = m.sfr.segment_data
shape = segment_data[list(segment_data.keys())[0]].shape
# check
for kper, seg_data in m.sfr.segment_data.items():
assert (
seg_data.shape == shape
), "cannot use: seg data must have the same number of entires for all kpers"
seg_data_col_order = list(seg_data.dtype.names)
# convert segment_data dictionary to multi index df - this could get ugly
reform = {
(k, c): segment_data[k][c]
for k in segment_data.keys()
for c in segment_data[k].dtype.names
}
seg_data_all_kper = pd.DataFrame.from_dict(reform)
seg_data_all_kper.columns.names = ["kper", "col"]
# extract the first seg data kper to a dataframe
seg_data = seg_data_all_kper[0].copy() # pd.DataFrame.from_records(seg_data)
# make sure all par cols are found and search of any data in kpers
missing = []
cols = par_cols.copy()
for par_col in set(par_cols + list(tmp_par_cols.keys())):
if par_col not in seg_data.columns:
if par_col in cols:
missing.append(cols.pop(cols.index(par_col)))
if par_col in tmp_par_cols.keys():
_ = tmp_par_cols.pop(par_col)
# look across all kper in multiindex df to check for values entry - fill with absmax should capture entries
else:
seg_data.loc[:, par_col] = (
seg_data_all_kper.loc[:, (slice(None), par_col)]
.abs()
.max(level=1, axis=1)
)
if len(missing) > 0:
warnings.warn(
"the following par_cols were not found in segment data: {0}".format(
",".join(missing)
),
PyemuWarning,
)
if len(missing) >= len(par_cols):
warnings.warn(
"None of the passed par_cols ({0}) were found in segment data.".format(
",".join(par_cols)
),
PyemuWarning,
)
seg_data = seg_data[seg_data_col_order] # reset column orders to inital
seg_data_org = seg_data.copy()
seg_data.to_csv(os.path.join(model_ws, "sfr_seg_pars.dat"), sep=",")
# the data cols not to parameterize
# better than a column indexer as pandas can change column orders
idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"]
notpar_cols = [c for c in seg_data.columns if c not in cols + idx_cols]
# process par cols
tpl_str, pvals = [], []
if include_temporal_pars:
tmp_pnames, tmp_tpl_str = [], []
tmp_df = pd.DataFrame(
data={c: 1.0 for c in tmp_par_cols.keys()},
index=list(m.sfr.segment_data.keys()),
)
tmp_df.sort_index(inplace=True)
tmp_df.to_csv(os.path.join(model_ws, "sfr_seg_temporal_pars.dat"))
for par_col in set(cols + list(tmp_par_cols.keys())):
print(par_col)
prefix = par_col
if tie_hcond and par_col == "hcond2":
prefix = "hcond1"
if seg_data.loc[:, par_col].sum() == 0.0:
print("all zeros for {0}...skipping...".format(par_col))
# seg_data.loc[:,par_col] = 1
# all zero so no need to set up
if par_col in cols:
# - add to notpar
notpar_cols.append(cols.pop(cols.index(par_col)))
if par_col in tmp_par_cols.keys():
_ = tmp_par_cols.pop(par_col)
if par_col in cols:
seg_data.loc[:, par_col] = seg_data.apply(
lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.nseg))
if float(x[par_col]) != 0.0
else "1.0",
axis=1,
)
org_vals = seg_data_org.loc[seg_data_org.loc[:, par_col] != 0.0, par_col]
pnames = seg_data.loc[org_vals.index, par_col]
pvals.extend(list(org_vals.values))
tpl_str.extend(list(pnames.values))
if par_col in tmp_par_cols.keys():
parnme = tmp_df.index.map(
lambda x: "{0}_{1:04d}_tmp".format(par_col, int(x))
if x in tmp_par_cols[par_col]
else 1.0
)
sel = parnme != 1.0
tmp_df.loc[sel, par_col] = parnme[sel].map(lambda x: "~ {0} ~".format(x))
tmp_tpl_str.extend(list(tmp_df.loc[sel, par_col].values))
tmp_pnames.extend(list(parnme[sel].values))
pnames = [t.replace("~", "").strip() for t in tpl_str]
df = pd.DataFrame(
{"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames
)
df.drop_duplicates(inplace=True)
if df.empty:
warnings.warn(
"No spatial sfr segment parameters have been set up, "
"either none of {0} were found or all were zero.".format(
",".join(par_cols)
),
PyemuWarning,
)
# return df
# set not par cols to 1.0
seg_data.loc[:, notpar_cols] = "1.0"
# write the template file
_write_df_tpl(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"), seg_data, sep=",")
# make sure the tpl file exists and has the same num of pars
parnme = parse_tpl_file(os.path.join(model_ws, "sfr_seg_pars.dat.tpl"))
assert len(parnme) == df.shape[0]
# set some useful par info
df["pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
if include_temporal_pars:
_write_df_tpl(
filename=os.path.join(model_ws, "sfr_seg_temporal_pars.dat.tpl"), df=tmp_df
)
pargp = [pname.split("_")[0] + "_tmp" for pname in tmp_pnames]
tmp_df = pd.DataFrame(
data={"parnme": tmp_pnames, "pargp": pargp}, index=tmp_pnames
)
if not tmp_df.empty:
tmp_df.loc[:, "org_value"] = 1.0
tmp_df.loc[:, "tpl_str"] = tmp_tpl_str
df = df.append(tmp_df[df.columns])
if df.empty:
warnings.warn(
"No sfr segment parameters have been set up, "
"either none of {0} were found or all were zero.".format(
",".join(set(par_cols + list(tmp_par_cols.keys())))
),
PyemuWarning,
)
return df
# write the config file used by apply_sfr_pars()
with open(os.path.join(model_ws, "sfr_seg_pars.config"), "w") as f:
f.write("nam_file {0}\n".format(nam_file))
f.write("model_ws {0}\n".format(model_ws))
f.write("mult_file sfr_seg_pars.dat\n")
f.write("sfr_filename {0}\n".format(m.sfr.file_name[0]))
if include_temporal_pars:
f.write("time_mult_file sfr_seg_temporal_pars.dat\n")
# set some useful par info
df.loc[:, "parubnd"] = 1.25
df.loc[:, "parlbnd"] = 0.75
hpars = df.loc[df.pargp.apply(lambda x: x.startswith("hcond")), "parnme"]
df.loc[hpars, "parubnd"] = 100.0
df.loc[hpars, "parlbnd"] = 0.01
return df
def setup_sfr_reach_parameters(nam_file, model_ws=".", par_cols=["strhc1"]):
"""Setup multiplier paramters for reach data, when reachinput option is specififed in sfr.
Args:
nam_file (`str`): MODFLOw name file. DIS, BAS, and SFR must be
available as pathed in the nam_file. Optionally, `nam_file` can be
an existing `flopy.modflow.Modflow`.
model_ws (`str`): model workspace for flopy to load the MODFLOW model from
par_cols ([`str`]): a list of segment data entires to parameterize
tie_hcond (`bool`): flag to use same mult par for hcond1 and hcond2 for a
given segment. Default is `True`.
include_temporal_pars ([`str`]): list of spatially-global multipliers to set up for
each stress period. Default is None
Returns:
**pandas.DataFrame**: a dataframe with useful parameter setup information
Note:
Similar to `gw_utils.setup_sfr_seg_parameters()`, method will apply params to sfr reachdata
Can load the dis, bas, and sfr files with flopy using model_ws. Or can pass a model object
(SFR loading can be slow)
This is the companion function of `gw_utils.apply_sfr_reach_parameters()`
Skips values = 0.0 since multipliers don't work for these
"""
try:
import flopy
except Exception as e:
return
if par_cols is None:
par_cols = ["strhc1"]
if isinstance(nam_file, flopy.modflow.mf.Modflow) and nam_file.sfr is not None:
# flopy MODFLOW model has been passed and has SFR loaded
m = nam_file
nam_file = m.namefile
model_ws = m.model_ws
else:
# if model has not been passed or SFR not loaded # load MODFLOW model
m = flopy.modflow.Modflow.load(
nam_file, load_only=["sfr"], model_ws=model_ws, check=False, forgive=False
)
# get reachdata as dataframe
reach_data = pd.DataFrame.from_records(m.sfr.reach_data)
# write inital reach_data as csv
reach_data_orig = reach_data.copy()
reach_data.to_csv(os.path.join(m.model_ws, "sfr_reach_pars.dat"), sep=",")
# generate template file with pars in par_cols
# process par cols
tpl_str, pvals = [], []
# par_cols=["strhc1"]
idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
# the data cols not to parameterize
notpar_cols = [c for c in reach_data.columns if c not in par_cols + idx_cols]
# make sure all par cols are found and search of any data in kpers
missing = []
cols = par_cols.copy()
for par_col in par_cols:
if par_col not in reach_data.columns:
missing.append(par_col)
cols.remove(par_col)
if len(missing) > 0:
warnings.warn(
"the following par_cols were not found in reach data: {0}".format(
",".join(missing)
),
PyemuWarning,
)
if len(missing) >= len(par_cols):
warnings.warn(
"None of the passed par_cols ({0}) were found in reach data.".format(
",".join(par_cols)
),
PyemuWarning,
)
for par_col in cols:
if par_col == "strhc1":
prefix = "strk" # shorten par
else:
prefix = par_col
reach_data.loc[:, par_col] = reach_data.apply(
lambda x: "~ {0}_{1:04d} ~".format(prefix, int(x.reachID))
if float(x[par_col]) != 0.0
else "1.0",
axis=1,
)
org_vals = reach_data_orig.loc[reach_data_orig.loc[:, par_col] != 0.0, par_col]
pnames = reach_data.loc[org_vals.index, par_col]
pvals.extend(list(org_vals.values))
tpl_str.extend(list(pnames.values))
pnames = [t.replace("~", "").strip() for t in tpl_str]
df = pd.DataFrame(
{"parnme": pnames, "org_value": pvals, "tpl_str": tpl_str}, index=pnames
)
df.drop_duplicates(inplace=True)
if df.empty:
warnings.warn(
"No sfr reach parameters have been set up, either none of {0} were found or all were zero.".format(
",".join(par_cols)
),
PyemuWarning,
)
else:
# set not par cols to 1.0
reach_data.loc[:, notpar_cols] = "1.0"
# write the template file
_write_df_tpl(
os.path.join(model_ws, "sfr_reach_pars.dat.tpl"), reach_data, sep=","
)
# write the config file used by apply_sfr_pars()
with open(os.path.join(model_ws, "sfr_reach_pars.config"), "w") as f:
f.write("nam_file {0}\n".format(nam_file))
f.write("model_ws {0}\n".format(model_ws))
f.write("mult_file sfr_reach_pars.dat\n")
f.write("sfr_filename {0}".format(m.sfr.file_name[0]))
# make sure the tpl file exists and has the same num of pars
parnme = parse_tpl_file(os.path.join(model_ws, "sfr_reach_pars.dat.tpl"))
assert len(parnme) == df.shape[0]
# set some useful par info
df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split("_")[0])
df.loc[:, "parubnd"] = 1.25
df.loc[:, "parlbnd"] = 0.75
hpars = df.loc[df.pargp.apply(lambda x: x.startswith("strk")), "parnme"]
df.loc[hpars, "parubnd"] = 100.0
df.loc[hpars, "parlbnd"] = 0.01
return df
def apply_sfr_seg_parameters(seg_pars=True, reach_pars=False):
"""apply the SFR segement multiplier parameters.
Args:
seg_pars (`bool`, optional): flag to apply segment-based parameters.
Default is True
reach_pars (`bool`, optional): flag to apply reach-based parameters.
Default is False
Returns:
**flopy.modflow.ModflowSfr**: the modified SFR package instance
Note:
Expects "sfr_seg_pars.config" to exist
Expects `nam_file` +"_backup_.sfr" to exist
"""
if not seg_pars and not reach_pars:
raise Exception(
"gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are False"
)
# if seg_pars and reach_pars:
# raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are True")
import flopy
bak_sfr_file, pars = None, None
if seg_pars:
assert os.path.exists("sfr_seg_pars.config")
with open("sfr_seg_pars.config", "r") as f:
pars = {}
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
bak_sfr_file = pars["nam_file"] + "_backup_.sfr"
# m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = pars["sfr_filename"]
mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0)
# time_mlt_df = None
# if "time_mult_file" in pars:
# time_mult_file = pars["time_mult_file"]
# time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False,index_col=0)
idx_cols = ["nseg", "icalc", "outseg", "iupseg", "iprior", "nstrpts"]
present_cols = [c for c in idx_cols if c in mlt_df.columns]
mlt_cols = mlt_df.columns.drop(present_cols)
for key, val in m.sfr.segment_data.items():
df = pd.DataFrame.from_records(val)
df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols]
val = df.to_records(index=False)
sfr.segment_data[key] = val
if reach_pars:
assert os.path.exists("sfr_reach_pars.config")
with open("sfr_reach_pars.config", "r") as f:
r_pars = {}
for line in f:
line = line.strip().split()
r_pars[line[0]] = line[1]
if bak_sfr_file is None: # will be the case is seg_pars is false
bak_sfr_file = r_pars["nam_file"] + "_backup_.sfr"
# m = flopy.modflow.Modflow.load(pars["nam_file"],model_ws=pars["model_ws"],load_only=["sfr"],check=False)
m = flopy.modflow.Modflow.load(
r_pars["nam_file"], load_only=[], check=False
)
sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m)
sfrfile = r_pars["sfr_filename"]
r_mlt_df = pd.read_csv(r_pars["mult_file"], sep=",", index_col=0)
r_idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"]
r_mlt_cols = r_mlt_df.columns.drop(r_idx_cols)
r_df = pd.DataFrame.from_records(m.sfr.reach_data)
r_df.loc[:, r_mlt_cols] *= r_mlt_df.loc[:, r_mlt_cols]
sfr.reach_data = r_df.to_records(index=False)
# m.remove_package("sfr")
if pars is not None and "time_mult_file" in pars:
time_mult_file = pars["time_mult_file"]
time_mlt_df = pd.read_csv(time_mult_file, delim_whitespace=False, index_col=0)
for kper, sdata in m.sfr.segment_data.items():
assert kper in time_mlt_df.index, (
"gw_utils.apply_sfr_seg_parameters() error: kper "
+ "{0} not in time_mlt_df index".format(kper)
)
for col in time_mlt_df.columns:
sdata[col] *= time_mlt_df.loc[kper, col]
sfr.write_file(filename=sfrfile)
return sfr
def apply_sfr_parameters(seg_pars=True, reach_pars=False):
"""thin wrapper around `gw_utils.apply_sfr_seg_parameters()`
Args:
seg_pars (`bool`, optional): flag to apply segment-based parameters.
Default is True
reach_pars (`bool`, optional): flag to apply reach-based parameters.
Default is False
Returns:
**flopy.modflow.ModflowSfr**: the modified SFR package instance
Note:
Expects "sfr_seg_pars.config" to exist
Expects `nam_file` +"_backup_.sfr" to exist
"""
sfr = apply_sfr_seg_parameters(seg_pars=seg_pars, reach_pars=reach_pars)
return sfr
def setup_sfr_obs(
sfr_out_file, seg_group_dict=None, ins_file=None, model=None, include_path=False
):
"""setup observations using the sfr ASCII output file. Setups
the ability to aggregate flows for groups of segments. Applies
only flow to aquier and flow out.
Args:
sft_out_file (`str`): the name and path to an existing SFR output file
seg_group_dict (`dict`): a dictionary of SFR segements to aggregate together for a single obs.
the key value in the dict is the base observation name. If None, all segments
are used as individual observations. Default is None
model (`flopy.mbase`): a flopy model. If passed, the observation names will have
the datetime of the observation appended to them. If None, the observation names
will have the stress period appended to them. Default is None.
include_path (`bool`): flag to prepend sfr_out_file path to sfr_obs.config. Useful for setting up
process in separate directory for where python is running.
Returns:
**pandas.DataFrame**: dataframe of observation name, simulated value and group.
Note:
This is the companion function of `gw_utils.apply_sfr_obs()`.
This function writes "sfr_obs.config" which must be kept in the dir where
"gw_utils.apply_sfr_obs()" is being called during the forward run
"""
sfr_dict = load_sfr_out(sfr_out_file)
kpers = list(sfr_dict.keys())
kpers.sort()
if seg_group_dict is None:
seg_group_dict = {"seg{0:04d}".format(s): s for s in sfr_dict[kpers[0]].segment}
else:
warnings.warn(
"Flow out (flout) of grouped segments will be aggregated... ", PyemuWarning
)
sfr_segs = set(sfr_dict[list(sfr_dict.keys())[0]].segment)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
for oname, segs in seg_group_dict.items():
if np.isscalar(segs):
segs_set = {segs}
segs = [segs]
else:
segs_set = set(segs)
diff = segs_set.difference(sfr_segs)
if len(diff) > 0:
raise Exception(
"the following segs listed with oname {0} where not found: {1}".format(
oname, ",".join([str(s) for s in diff])
)
)
for seg in segs:
keys.append(oname)
values.append(seg)
df_key = pd.DataFrame({"obs_base": keys, "segment": values})
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth, "sfr_obs.config")
else:
config_file = "sfr_obs.config"
print("writing 'sfr_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = "."
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (
pd.to_datetime(model.start_datetime)
+ pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d")
).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1
)
df.loc[:, "flout_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1
)
if ins_file is None:
ins_file = sfr_out_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == "":
pth = "."
bd = os.getcwd()
os.chdir(pth)
df = try_process_output_file(
os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1]
)
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(
lambda x: "flaqx" if x.startswith("fa") else "flout"
)
return df
def apply_sfr_obs():
"""apply the sfr observation process
Args:
None
Returns:
**pandas.DataFrame**: a dataframe of aggregrated sfr segment aquifer and outflow
Note:
This is the companion function of `gw_utils.setup_sfr_obs()`.
Requires `sfr_obs.config`.
Writes `sfr_out_file`+".processed", where `sfr_out_file` is defined in "sfr_obs.config"
"""
assert os.path.exists("sfr_obs.config")
df_key = pd.read_csv("sfr_obs.config", index_col=0)
assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :]
sfr_out_file = df_key.iloc[0, 1]
df_key = df_key.iloc[1:, :]
df_key.loc[:, "segment"] = df_key.segment.apply(np.int)
df_key.index = df_key.segment
seg_group_dict = df_key.groupby(df_key.obs_base).groups
sfr_kper = load_sfr_out(sfr_out_file)
kpers = list(sfr_kper.keys())
kpers.sort()
# results = {o:[] for o in seg_group_dict.keys()}
results = []
for kper in kpers:
df = sfr_kper[kper]
for obs_base, segs in seg_group_dict.items():
agg = df.loc[
segs.values, :
].sum() # still agg flout where seg groups are passed!
# print(obs_base,agg)
results.append([kper, obs_base, agg["flaqx"], agg["flout"]])
df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"])
df.sort_values(by=["kper", "obs_base"], inplace=True)
df.to_csv(sfr_out_file + ".processed", sep=" ", index=False)
return df
def load_sfr_out(sfr_out_file, selection=None):
"""load an ASCII SFR output file into a dictionary of kper: dataframes.
Args:
sfr_out_file (`str`): SFR ASCII output file
selection (`pandas.DataFrame`): a dataframe of `reach` and `segment` pairs to
load. If `None`, all reach-segment pairs are loaded. Default is `None`.
Returns:
**dict**: dictionary of {kper:`pandas.DataFrame`} of SFR output.
Note:
Aggregates flow to aquifer for segments and returns and flow out at
downstream end of segment.
"""
assert os.path.exists(sfr_out_file), "couldn't find sfr out file {0}".format(
sfr_out_file
)
tag = " stream listing"
lcount = 0
sfr_dict = {}
if selection is None:
pass
elif isinstance(selection, str):
assert (
selection == "all"
), "If string passed as selection only 'all' allowed: " "{}".format(selection)
else:
assert isinstance(
selection, pd.DataFrame
), "'selection needs to be pandas Dataframe. " "Type {} passed.".format(
type(selection)
)
assert np.all(
[sr in selection.columns for sr in ["segment", "reach"]]
), "Either 'segment' or 'reach' not in selection columns"
with open(sfr_out_file) as f:
while True:
line = f.readline().lower()
lcount += 1
if line == "":
break
if line.startswith(tag):
raw = line.strip().split()
kper = int(raw[3]) - 1
kstp = int(raw[5]) - 1
[f.readline() for _ in range(4)] # skip to where the data starts
lcount += 4
dlines = []
while True:
dline = f.readline()
lcount += 1
if dline.strip() == "":
break
draw = dline.strip().split()
dlines.append(draw)
df = pd.DataFrame(data=np.array(dlines)).iloc[:, [3, 4, 6, 7]]
df.columns = ["segment", "reach", "flaqx", "flout"]
df["segment"] = df.segment.astype(np.int)
df["reach"] = df.reach.astype(np.int)
df["flaqx"] = df.flaqx.astype(np.float)
df["flout"] = df.flout.astype(np.float)
df.index = [
"{0:03d}_{1:03d}".format(s, r)
for s, r in np.array([df.segment.values, df.reach.values]).T
]
# df.index = df.apply(
# lambda x: "{0:03d}_{1:03d}".format(
# int(x.segment), int(x.reach)), axis=1)
if selection is None: # setup for all segs, aggregate
gp = df.groupby(df.segment)
bot_reaches = (
gp[["reach"]]
.max()
.apply(
lambda x: "{0:03d}_{1:03d}".format(
int(x.name), int(x.reach)
),
axis=1,
)
)
# only sum distributed output # take flow out of seg
df2 = pd.DataFrame(
{
"flaqx": gp.flaqx.sum(),
"flout": df.loc[bot_reaches, "flout"].values,
},
index=gp.groups.keys(),
)
# df = df.groupby(df.segment).sum()
df2["segment"] = df2.index
elif isinstance(selection, str) and selection == "all":
df2 = df
else:
seg_reach_id = selection.apply(
lambda x: "{0:03d}_{1:03d}".format(
int(x.segment), int(x.reach)
),
axis=1,
).values
for sr in seg_reach_id:
if sr not in df.index:
s, r = [x.lstrip("0") for x in sr.split("_")]
warnings.warn(
"Requested segment reach pair ({0},{1}) "
"is not in sfr output. Dropping...".format(
int(r), int(s)
),
PyemuWarning,
)
seg_reach_id = np.delete(
seg_reach_id, np.where(seg_reach_id == sr), axis=0
)
df2 = df.loc[seg_reach_id].copy()
if kper in sfr_dict.keys():
print(
"multiple entries found for kper {0}, "
"replacing...".format(kper)
)
sfr_dict[kper] = df2
return sfr_dict
def setup_sfr_reach_obs(
sfr_out_file, seg_reach=None, ins_file=None, model=None, include_path=False
):
"""setup observations using the sfr ASCII output file. Setups
sfr point observations using segment and reach numbers.
Args:
sft_out_file (`str`): the path and name of an existing SFR output file
seg_reach (varies): a dict, or list of SFR [segment,reach] pairs identifying
locations of interest. If `dict`, the key value in the dict is the base
observation name. If None, all reaches are used as individual observations.
Default is None - THIS MAY SET UP A LOT OF OBS!
model (`flopy.mbase`): a flopy model. If passed, the observation names will
have the datetime of the observation appended to them. If None, the
observation names will have the stress period appended to them. Default is None.
include_path (`bool`): a flag to prepend sfr_out_file path to sfr_obs.config. Useful
for setting up process in separate directory for where python is running.
Returns:
`pd.DataFrame`: a dataframe of observation names, values, and groups
Note:
This is the companion function of `gw_utils.apply_sfr_reach_obs()`.
This function writes "sfr_reach_obs.config" which must be kept in the dir where
"apply_sfr_reach_obs()" is being called during the forward run
"""
if seg_reach is None:
warnings.warn("Obs will be set up for every reach", PyemuWarning)
seg_reach = "all"
elif isinstance(seg_reach, list) or isinstance(seg_reach, np.ndarray):
if np.ndim(seg_reach) == 1:
seg_reach = [seg_reach]
assert (
np.shape(seg_reach)[1] == 2
), "varible seg_reach expected shape (n,2), received {0}".format(
np.shape(seg_reach)
)
seg_reach = pd.DataFrame(seg_reach, columns=["segment", "reach"])
seg_reach.index = seg_reach.apply(
lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1
)
elif isinstance(seg_reach, dict):
seg_reach = pd.DataFrame.from_dict(
seg_reach, orient="index", columns=["segment", "reach"]
)
else:
assert isinstance(
seg_reach, pd.DataFrame
), "'selection needs to be pandas Dataframe. Type {} passed.".format(
type(seg_reach)
)
assert np.all(
[sr in seg_reach.columns for sr in ["segment", "reach"]]
), "Either 'segment' or 'reach' not in selection columns"
sfr_dict = load_sfr_out(sfr_out_file, selection=seg_reach)
kpers = list(sfr_dict.keys())
kpers.sort()
if isinstance(seg_reach, str) and seg_reach == "all":
seg_reach = sfr_dict[kpers[0]][["segment", "reach"]]
seg_reach.index = seg_reach.apply(
lambda x: "s{0:03d}r{1:03d}".format(int(x.segment), int(x.reach)), axis=1
)
keys = ["sfr_out_file"]
if include_path:
values = [os.path.split(sfr_out_file)[-1]]
else:
values = [sfr_out_file]
diff = seg_reach.loc[
seg_reach.apply(
lambda x: "{0:03d}_{1:03d}".format(int(x.segment), int(x.reach))
not in sfr_dict[list(sfr_dict.keys())[0]].index,
axis=1,
)
]
if len(diff) > 0:
for ob in diff.itertuples():
warnings.warn(
"segs,reach pair listed with onames {0} was not found: {1}".format(
ob.Index, "({},{})".format(ob.segment, ob.reach)
),
PyemuWarning,
)
seg_reach = seg_reach.drop(diff.index)
seg_reach["obs_base"] = seg_reach.index
df_key = pd.DataFrame({"obs_base": keys, "segment": 0, "reach": values})
df_key = pd.concat([df_key, seg_reach], sort=True).reset_index(drop=True)
if include_path:
pth = os.path.join(*[p for p in os.path.split(sfr_out_file)[:-1]])
config_file = os.path.join(pth, "sfr_reach_obs.config")
else:
config_file = "sfr_reach_obs.config"
print("writing 'sfr_reach_obs.config' to {0}".format(config_file))
df_key.to_csv(config_file)
bd = "."
if include_path:
bd = os.getcwd()
os.chdir(pth)
try:
df = apply_sfr_reach_obs()
except Exception as e:
os.chdir(bd)
raise Exception("error in apply_sfr_reach_obs(): {0}".format(str(e)))
os.chdir(bd)
if model is not None:
dts = (
pd.to_datetime(model.start_datetime)
+ pd.to_timedelta(np.cumsum(model.dis.perlen.array), unit="d")
).date
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "time_str"] = df.datetime.apply(lambda x: x.strftime("%Y%m%d"))
else:
df.loc[:, "time_str"] = df.kper.apply(lambda x: "{0:04d}".format(x))
df.loc[:, "flaqx_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fa", x.obs_base, x.time_str), axis=1
)
df.loc[:, "flout_obsnme"] = df.apply(
lambda x: "{0}_{1}_{2}".format("fo", x.obs_base, x.time_str), axis=1
)
if ins_file is None:
ins_file = sfr_out_file + ".reach_processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
for fla, flo in zip(df.flaqx_obsnme, df.flout_obsnme):
f.write("l1 w w !{0}! !{1}!\n".format(fla, flo))
df = None
pth = os.path.split(ins_file)[:-1]
pth = os.path.join(*pth)
if pth == "":
pth = "."
bd = os.getcwd()
os.chdir(pth)
try:
df = try_process_output_file(
os.path.split(ins_file)[-1], os.path.split(sfr_out_file + ".processed")[-1]
)
except Exception as e:
pass
os.chdir(bd)
if df is not None:
df.loc[:, "obsnme"] = df.index.values
df.loc[:, "obgnme"] = df.obsnme.apply(
lambda x: "flaqx" if x.startswith("fa") else "flout"
)
return df
def apply_sfr_reach_obs():
"""apply the sfr reach observation process.
Returns:
`pd.DataFrame`: a dataframe of sfr aquifer and outflow ad segment,reach locations
Note:
This is the companion function of `gw_utils.setup_sfr_reach_obs()`.
Requires sfr_reach_obs.config.
Writes <sfr_out_file>.processed, where <sfr_out_file> is defined in
"sfr_reach_obs.config"
"""
assert os.path.exists("sfr_reach_obs.config")
df_key = pd.read_csv("sfr_reach_obs.config", index_col=0)
assert df_key.iloc[0, 0] == "sfr_out_file", df_key.iloc[0, :]
sfr_out_file = df_key.iloc[0].reach
df_key = df_key.iloc[1:, :].copy()
df_key.loc[:, "segment"] = df_key.segment.apply(np.int)
df_key.loc[:, "reach"] = df_key.reach.apply(np.int)
df_key = df_key.set_index("obs_base")
sfr_kper = load_sfr_out(sfr_out_file, df_key)
kpers = list(sfr_kper.keys())
kpers.sort()
results = []
for kper in kpers:
df = sfr_kper[kper]
for sr in df_key.itertuples():
ob = df.loc["{0:03d}_{1:03d}".format(sr.segment, sr.reach), :]
results.append([kper, sr.Index, ob["flaqx"], ob["flout"]])
df = pd.DataFrame(data=results, columns=["kper", "obs_base", "flaqx", "flout"])
df.sort_values(by=["kper", "obs_base"], inplace=True)
df.to_csv(sfr_out_file + ".reach_processed", sep=" ", index=False)
return df
def modflow_sfr_gag_to_instruction_file(
gage_output_file, ins_file=None, parse_filename=False
):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Args:
gage_output_file (`str`): the gage output filename (ASCII).
ins_file (`str`, optional): the name of the instruction file to
create. If None, the name is `gage_output_file` +".ins".
Default is None
parse_filename (`bool`): if True, get the gage_num parameter by
parsing the gage output file filename if False, get the gage
number from the file itself
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with obsnme and obsval for the sfr simulated flows.
- **str**: file name of instructions file relating to gage output.
- **str**: file name of processed gage output for all times
Note:
Sets up observations for gage outputs only for the Flow column.
If `parse_namefile` is true, only text up to first '.' is used as the gage_num
"""
if ins_file is None:
ins_file = gage_output_file + ".ins"
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, "r").readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split(".")[0]
else:
gage_num = re.sub(
"[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0]
)
# get the column names
cols = (
[i.lower() for i in header if "data" in i.lower()][0]
.lower()
.replace('"', "")
.replace("data:", "")
.split()
)
# make sure "Flow" is included in the columns
if "flow" not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == "flow")[0][0]
# write out the instruction file lines
inslines = [
"l1 " + (flowidx + 1) * "w " + "!g{0}_{1:d}!".format(gage_num, j)
for j in range(len(indat) - len(header))
]
inslines[0] = inslines[0].replace("l1", "l{0:d}".format(len(header) + 1))
# write the instruction file
with open(ins_file, "w") as ofp:
ofp.write("pif ~\n")
[ofp.write("{0}\n".format(line)) for line in inslines]
df = try_process_output_file(ins_file, gage_output_file)
return df, ins_file, gage_output_file
def setup_gage_obs(gage_file, ins_file=None, start_datetime=None, times=None):
"""setup a forward run post processor routine for the modflow gage file
Args:
gage_file (`str`): the gage output file (ASCII)
ins_file (`str`, optional): the name of the instruction file to create. If None, the name
is `gage_file`+".processed.ins". Default is `None`
start_datetime (`str`): a `pandas.to_datetime()` compatible `str`. If not `None`,
then the resulting observation names have the datetime suffix. If `None`,
the suffix is the output totim. Default is `None`.
times ([`float`]): a container of times to make observations for. If None,
all times are used. Default is None.
Returns:
tuple containing
- **pandas.DataFrame**: a dataframe with observation name and simulated values for the
values in the gage file.
- **str**: file name of instructions file that was created relating to gage output.
- **str**: file name of processed gage output (processed according to times passed above.)
Note:
Setups up observations for gage outputs (all columns).
This is the companion function of `gw_utils.apply_gage_obs()`
"""
with open(gage_file, "r") as f:
line1 = f.readline()
gage_num = int(
re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0])
)
gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower()
obj_num = int(line1.replace('"', "").strip().split()[-1])
line2 = f.readline()
df = pd.read_csv(
f, delim_whitespace=True, names=line2.replace('"', "").split()[1:]
)
df.columns = [
c.lower().replace("-", "_").replace(".", "_").strip("_") for c in df.columns
]
# get unique observation ids
obs_ids = {
col: "" for col in df.columns[1:]
} # empty dictionary for observation ids
for col in df.columns[1:]: # exclude column 1 (TIME)
colspl = col.split("_")
if len(colspl) > 1:
# obs name built out of "g"(for gage) "s" or "l"(for gage type) 2 chars from column name - date added later
obs_ids[col] = "g{0}{1}{2}".format(
gage_type[0], colspl[0][0], colspl[-1][0]
)
else:
obs_ids[col] = "g{0}{1}".format(gage_type[0], col[0:2])
with open(
"_gage_obs_ids.csv", "w"
) as f: # write file relating obs names to meaningfull keys!
[f.write("{0},{1}\n".format(key, obs)) for key, obs in obs_ids.items()]
# find passed times in df
if times is None:
times = df.time.unique()
missing = []
utimes = df.time.unique()
for t in times:
if not np.isclose(t, utimes).any():
missing.append(str(t))
if len(missing) > 0:
print(df.time)
raise Exception("the following times are missing:{0}".format(",".join(missing)))
# write output times to config file
with open("gage_obs.config", "w") as f:
f.write(gage_file + "\n")
[f.write("{0:15.10E}\n".format(t)) for t in times]
# extract data for times: returns dataframe and saves a processed df - read by pest
df, obs_file = apply_gage_obs(return_obs_file=True)
utimes = df.time.unique()
for t in times:
assert np.isclose(
t, utimes
).any(), "time {0} missing in processed dataframe".format(t)
idx = df.time.apply(
lambda x: np.isclose(x, times).any()
) # boolean selector of desired times in df
if start_datetime is not None:
# convert times to usable observation times
start_datetime = pd.to_datetime(start_datetime)
df.loc[:, "time_str"] = pd.to_timedelta(df.time, unit="d") + start_datetime
df.loc[:, "time_str"] = df.time_str.apply(
lambda x: datetime.strftime(x, "%Y%m%d")
)
else:
df.loc[:, "time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x))
# set up instructions (line feed for lines without obs (not in time)
df.loc[:, "ins_str"] = "l1\n"
df_times = df.loc[idx, :] # Slice by desired times
# TODO include GAGE No. in obs name (if permissible)
df.loc[df_times.index, "ins_str"] = df_times.apply(
lambda x: "l1 w {}\n".format(
" w ".join(
["!{0}{1}!".format(obs, x.time_str) for key, obs in obs_ids.items()]
)
),
axis=1,
)
df.index = np.arange(df.shape[0])
if ins_file is None:
ins_file = gage_file + ".processed.ins"
with open(ins_file, "w") as f:
f.write("pif ~\nl1\n")
[f.write(i) for i in df.ins_str]
df = try_process_output_file(ins_file, gage_file + ".processed")
return df, ins_file, obs_file
def apply_gage_obs(return_obs_file=False):
"""apply the modflow gage obs post-processor
Args:
return_obs_file (`bool`): flag to return the processed
observation file. Default is `False`.
Note:
This is the companion function of `gw_utils.setup_gage_obs()`
"""
times = []
with open("gage_obs.config") as f:
gage_file = f.readline().strip()
for line in f:
times.append(float(line.strip()))
obs_file = gage_file + ".processed"
with open(gage_file, "r") as f:
line1 = f.readline()
gage_num = int(
re.sub("[^0-9]", "", line1.split("GAGE No.")[-1].strip().split()[0])
)
gage_type = line1.split("GAGE No.")[-1].strip().split()[1].lower()
obj_num = int(line1.replace('"', "").strip().split()[-1])
line2 = f.readline()
df = pd.read_csv(
f, delim_whitespace=True, names=line2.replace('"', "").split()[1:]
)
df.columns = [c.lower().replace("-", "_").replace(".", "_") for c in df.columns]
df = df.loc[df.time.apply(lambda x: np.isclose(x, times).any()), :]
df.to_csv(obs_file, sep=" ", index=False)
if return_obs_file:
return df, obs_file
else:
return df
def apply_hfb_pars(par_file="hfb6_pars.csv"):
"""a function to apply HFB multiplier parameters.
Args:
par_file (`str`): the HFB parameter info file.
Default is `hfb_pars.csv`
Note:
This is the companion function to
`gw_utils.write_hfb_zone_multipliers_template()`
This is to account for the horrible HFB6 format that differs from other
BCs making this a special case
Requires "hfb_pars.csv"
Should be added to the forward_run.py script
"""
hfb_pars = pd.read_csv(par_file)
hfb_mults_contents = open(hfb_pars.mlt_file.values[0], "r").readlines()
skiprows = (
sum([1 if i.strip().startswith("#") else 0 for i in hfb_mults_contents]) + 1
)
header = hfb_mults_contents[:skiprows]
# read in the multipliers
names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]
hfb_mults = pd.read_csv(
hfb_pars.mlt_file.values[0],
skiprows=skiprows,
delim_whitespace=True,
names=names,
).dropna()
# read in the original file
hfb_org = pd.read_csv(
hfb_pars.org_file.values[0],
skiprows=skiprows,
delim_whitespace=True,
names=names,
).dropna()
# multiply it out
hfb_org.hydchr *= hfb_mults.hydchr
for cn in names[:-1]:
hfb_mults[cn] = hfb_mults[cn].astype(np.int)
hfb_org[cn] = hfb_org[cn].astype(np.int)
# write the results
with open(hfb_pars.model_file.values[0], "w", newline="") as ofp:
[ofp.write("{0}\n".format(line.strip())) for line in header]
ofp.flush()
hfb_org[["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]].to_csv(
ofp, sep=" ", header=None, index=None
)
def write_hfb_zone_multipliers_template(m):
"""write a template file for an hfb using multipliers per zone (double yuck!)
Args:
m (`flopy.modflow.Modflow`): a model instance with an HFB package
Returns:
tuple containing
- **dict**: a dictionary with original unique HFB conductivity values and their
corresponding parameter names
- **str**: the template filename that was created
"""
if m.hfb6 is None:
raise Exception("no HFB package found")
# find the model file
hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0])
# this will use multipliers, so need to copy down the original
if not os.path.exists(os.path.join(m.model_ws, "hfb6_org")):
os.mkdir(os.path.join(m.model_ws, "hfb6_org"))
# copy down the original file
shutil.copy2(
os.path.join(m.model_ws, m.hfb6.file_name[0]),
os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]),
)
if not os.path.exists(os.path.join(m.model_ws, "hfb6_mlt")):
os.mkdir(os.path.join(m.model_ws, "hfb6_mlt"))
# read in the model file
hfb_file_contents = open(hfb_file, "r").readlines()
# navigate the header
skiprows = (
sum([1 if i.strip().startswith("#") else 0 for i in hfb_file_contents]) + 1
)
header = hfb_file_contents[:skiprows]
# read in the data
names = ["lay", "irow1", "icol1", "irow2", "icol2", "hydchr"]
hfb_in = pd.read_csv(
hfb_file, skiprows=skiprows, delim_whitespace=True, names=names
).dropna()
for cn in names[:-1]:
hfb_in[cn] = hfb_in[cn].astype(np.int)
# set up a multiplier for each unique conductivity value
unique_cond = hfb_in.hydchr.unique()
hfb_mults = dict(
zip(unique_cond, ["hbz_{0:04d}".format(i) for i in range(len(unique_cond))])
)
# set up the TPL line for each parameter and assign
hfb_in["tpl"] = "blank"
for cn, cg in hfb_in.groupby("hydchr"):
hfb_in.loc[hfb_in.hydchr == cn, "tpl"] = "~{0:^10s}~".format(hfb_mults[cn])
assert "blank" not in hfb_in.tpl
# write out the TPL file
tpl_file = os.path.join(m.model_ws, "hfb6.mlt.tpl")
with open(tpl_file, "w", newline="") as ofp:
ofp.write("ptf ~\n")
[ofp.write("{0}\n".format(line.strip())) for line in header]
ofp.flush()
hfb_in[["lay", "irow1", "icol1", "irow2", "icol2", "tpl"]].to_csv(
ofp, sep=" ", quotechar=" ", header=None, index=None, mode="a"
)
# make a lookup for lining up the necessary files to
# perform multiplication with the helpers.apply_hfb_pars() function
# which must be added to the forward run script
with open(os.path.join(m.model_ws, "hfb6_pars.csv"), "w") as ofp:
ofp.write("org_file,mlt_file,model_file\n")
ofp.write(
"{0},{1},{2}\n".format(
os.path.join(m.model_ws, "hfb6_org", m.hfb6.file_name[0]),
os.path.join(
m.model_ws,
"hfb6_mlt",
os.path.basename(tpl_file).replace(".tpl", ""),
),
hfb_file,
)
)
return hfb_mults, tpl_file
def write_hfb_template(m):
"""write a template file for an hfb (yuck!)
Args:
m (`flopy.modflow.Modflow`): a model instance with an HFB package
Returns:
tuple containing
- **str**: name of the template file that was created
- **pandas.DataFrame**: a dataframe with use control file info for the
HFB parameters
"""
assert m.hfb6 is not None
hfb_file = os.path.join(m.model_ws, m.hfb6.file_name[0])
assert os.path.exists(hfb_file), "couldn't find hfb_file {0}".format(hfb_file)
f_in = open(hfb_file, "r")
tpl_file = hfb_file + ".tpl"
f_tpl = open(tpl_file, "w")
f_tpl.write("ptf ~\n")
parnme, parval1, xs, ys = [], [], [], []
iis, jjs, kks = [], [], []
xc = m.sr.xcentergrid
yc = m.sr.ycentergrid
while True:
line = f_in.readline()
if line == "":
break
f_tpl.write(line)
if not line.startswith("#"):
raw = line.strip().split()
nphfb = int(raw[0])
mxfb = int(raw[1])
nhfbnp = int(raw[2])
if nphfb > 0 or mxfb > 0:
raise Exception("not supporting terrible HFB pars")
for i in range(nhfbnp):
line = f_in.readline()
if line == "":
raise Exception("EOF")
raw = line.strip().split()
k = int(raw[0]) - 1
i = int(raw[1]) - 1
j = int(raw[2]) - 1
pn = "hb{0:02}{1:04d}{2:04}".format(k, i, j)
pv = float(raw[5])
raw[5] = "~ {0} ~".format(pn)
line = " ".join(raw) + "\n"
f_tpl.write(line)
parnme.append(pn)
parval1.append(pv)
xs.append(xc[i, j])
ys.append(yc[i, j])
iis.append(i)
jjs.append(j)
kks.append(k)
break
f_tpl.close()
f_in.close()
df = pd.DataFrame(
{
"parnme": parnme,
"parval1": parval1,
"x": xs,
"y": ys,
"i": iis,
"j": jjs,
"k": kks,
},
index=parnme,
)
df.loc[:, "pargp"] = "hfb_hydfac"
df.loc[:, "parubnd"] = df.parval1.max() * 10.0
df.loc[:, "parlbnd"] = df.parval1.min() * 0.1
return tpl_file, df
class GsfReader:
"""
a helper class to read a standard modflow-usg gsf file
Args:
gsffilename (`str`): filename
"""
def __init__(self, gsffilename):
with open(gsffilename, "r") as f:
self.read_data = f.readlines()
self.nnode, self.nlay, self.iz, self.ic = [
int(n) for n in self.read_data[1].split()
]
self.nvertex = int(self.read_data[2])
def get_vertex_coordinates(self):
"""
Returns:
Dictionary containing list of x, y and z coordinates for each vertex
"""
# vdata = self.read_data[3:self.nvertex+3]
vertex_coords = {}
for vert in range(self.nvertex):
x, y, z = self.read_data[3 + vert].split()
vertex_coords[vert + 1] = [float(x), float(y), float(z)]
return vertex_coords
def get_node_data(self):
"""
Returns:
nodedf: a pd.DataFrame containing Node information; Node, X, Y, Z, layer, numverts, vertidx
"""
node_data = []
for node in range(self.nnode):
nid, x, y, z, lay, numverts = self.read_data[
self.nvertex + 3 + node
].split()[:6]
# vertidx = {'ivertex': [int(n) for n in self.read_data[self.nvertex+3 + node].split()[6:]]}
vertidx = [
int(n) for n in self.read_data[self.nvertex + 3 + node].split()[6:]
]
node_data.append(
[
int(nid),
float(x),
float(y),
float(z),
int(lay),
int(numverts),
vertidx,
]
)
nodedf = pd.DataFrame(
node_data, columns=["node", "x", "y", "z", "layer", "numverts", "vertidx"]
)
return nodedf
def get_node_coordinates(self, zcoord=False, zero_based=False):
"""
Args:
zcoord (`bool`): flag to add z coord to coordinates. Default is False
zero_based (`bool`): flag to subtract one from the node numbers in the returned
node_coords dict. This is needed to support PstFrom. Default is False
Returns:
node_coords: Dictionary containing x and y coordinates for each node
"""
node_coords = {}
for node in range(self.nnode):
nid, x, y, z, lay, numverts = self.read_data[
self.nvertex + 3 + node
].split()[:6]
nid = int(nid)
if zero_based:
nid -= 1
node_coords[nid] = [float(x), float(y)]
if zcoord:
node_coords[nid] += [float(z)]
return node_coords
| bsd-3-clause |
MadsJensen/agency_connectivity | tf_functions.py | 1 | 5293 | """
Functions for TF analysis.
@author: mje
@email: mads [] cnru.dk
"""
import mne
from mne.time_frequency import (psd_multitaper, tfr_multitaper, tfr_morlet,
cwt_morlet)
from mne.viz import iter_topography
import matplotlib.pyplot as plt
import numpy as np
def calc_psd_epochs(epochs, plot=False):
"""Calculate PSD for epoch.
Parameters
----------
epochs : list of epochs
plot : bool
To show plot of the psds.
It will be average for each condition that is shown.
Returns
-------
psds_vol : numpy array
The psds for the voluntary condition.
psds_invol : numpy array
The psds for the involuntary condition.
"""
tmin, tmax = -0.5, 0.5
fmin, fmax = 2, 90
# n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds_vol, freqs = psd_multitaper(epochs["voluntary"],
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds_inv, freqs = psd_multitaper(epochs["involuntary"],
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds_vol = 20 * np.log10(psds_vol) # scale to dB
psds_inv = 20 * np.log10(psds_inv) # scale to dB
if plot:
def my_callback(ax, ch_idx):
"""Executed once you click on one of the channels in the plot."""
ax.plot(freqs, psds_vol_plot[ch_idx], color='red',
label="voluntary")
ax.plot(freqs, psds_inv_plot[ch_idx], color='blue',
label="involuntary")
ax.set_xlabel = 'Frequency (Hz)'
ax.set_ylabel = 'Power (dB)'
ax.legend()
psds_vol_plot = psds_vol.copy().mean(axis=0)
psds_inv_plot = psds_inv.copy().mean(axis=0)
for ax, idx in iter_topography(epochs.info,
fig_facecolor='k',
axis_facecolor='k',
axis_spinecolor='k',
on_pick=my_callback):
ax.plot(psds_vol_plot[idx], color='red', label="voluntary")
ax.plot(psds_inv_plot[idx], color='blue', label="involuntary")
plt.legend()
plt.gcf().suptitle('Power spectral densities')
plt.show()
return psds_vol, psds_inv, freqs
def multitaper_analysis(epochs):
"""
Parameters
----------
epochs : list of epochs
Returns
-------
result : numpy array
The result of the multitaper analysis.
"""
frequencies = np.arange(6., 90., 2.)
n_cycles = frequencies / 2.
time_bandwidth = 4 # Same time-smoothing as (1), 7 tapers.
power, plv = tfr_multitaper(epochs, freqs=frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, return_itc=True)
return power, plv
def morlet_analysis(epochs, n_cycles=4):
"""
Parameters
----------
epochs : list of epochs
Returns
-------
result : numpy array
The result of the multitaper analysis.
"""
frequencies = np.arange(6., 30., 2.)
# n_cycles = frequencies / 2.
power, plv = tfr_morlet(epochs, freqs=frequencies, n_cycles=n_cycles,
return_itc=True,
verbose=True)
return power, plv
def single_trial_tf(epochs, frequencies, n_cycles=4.):
"""
Parameters
----------
epochs : Epochs object
The epochs to calculate TF analysis on.
frequencies : numpy array
n_cycles : int
The number of cycles for the Morlet wavelets.
Returns
-------
results : numpy array
"""
results = []
for j in range(len(epochs)):
tfr = cwt_morlet(epochs.get_data()[j],
sfreq=epochs.info["sfreq"],
freqs=frequencies,
use_fft=True,
n_cycles=n_cycles,
# decim=2,
zero_mean=False)
results.append(tfr)
return results
def calc_spatial_resolution(freqs, n_cycles):
"""Calculate the spatial resolution for a Morlet wavelet.
The formula is: (freqs * cycles)*2.
Parameters
----------
freqs : numpy array
The frequencies to be calculated.
n_cycles : int or numpy array
The number of cycles used. Can be integer for the same cycle for all
frequencies, or a numpy array for individual cycles per frequency.
Returns
-------
result : numpy array
The results
"""
return (freqs / float(n_cycles)) * 2
def calc_wavelet_duration(freqs, n_cycles):
"""Calculate the wavelet duration for a Morlet wavelet in ms.
The formula is: (cycle / frequencies / pi)*1000
Parameters
----------
freqs : numpy array
The frequencies to be calculated.
n_cycles : int or numpy array
The number of cycles used. Can be integer for the same cycle for all
frequencies, or a numpy array for individual cycles per frequency.
Returns
-------
result : numpy array
The results
"""
return (float(n_cycles) / freqs / np.pi) * 1000
| bsd-3-clause |
sassoftware/saspy | saspy/sasiocom.py | 1 | 37140 | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import csv
import io
import numbers
import os
import shlex
import sys
import warnings
try:
from win32com.client import dynamic
except ImportError:
pass
try:
import pandas as pd
except ImportError:
pass
class SASConfigCOM(object):
"""
This object is not intended to be used directly. Instantiate a SASSession
object instead.
"""
NO_OVERRIDE = ['kernel', 'sb']
def __init__(self, **kwargs):
self._kernel = kwargs.get('kernel')
session = kwargs['sb']
sascfg = session.sascfg.SAScfg
name = session.sascfg.name
cfg = getattr(sascfg, name)
opts = getattr(sascfg, 'SAS_config_options', {})
outs = getattr(sascfg, 'SAS_output_options', {})
self.host = cfg.get('iomhost')
self.port = cfg.get('iomport')
self.user = cfg.get('omruser')
self.pw = cfg.get('omrpw')
self.authkey = cfg.get('authkey')
self.class_id = cfg.get('class_id', '440196d4-90f0-11d0-9f41-00a024bb830c')
self.provider = cfg.get('provider')
self.encoding = cfg.get('encoding', '')
self.output = outs.get('output', 'html5')
self.verbose = opts.get('verbose', True)
self.verbose = kwargs.get('verbose', self.verbose)
self._lock = opts.get('lock_down', True)
self._prompt = session.sascfg._prompt
if self.authkey is not None:
self._set_authinfo()
for key, value in filter(lambda x: x[0] not in self.NO_OVERRIDE, kwargs.items()):
self._try_override(key, value)
def _set_authinfo(self):
"""
Attempt to set the session user's credentials based on provided
key to read from ~/.authinfo file. See .authinfo documentation
here: https://documentation.sas.com/api/docsets/authinfo/9.4/content/authinfo.pdf.
This method supports a subset of the .authinfo spec, in accordance with
other IO access methods. This method will only parse `user` and `password`
arguments, but does support spaces in values if the value is quoted. Use
python's `shlex` library to parse these values.
"""
if os.name == 'nt':
authfile = os.path.expanduser(os.path.join('~', '_authinfo'))
else:
authfile = os.path.expanduser(os.path.join('~', '.authinfo'))
try:
with open(authfile, 'r') as f:
# Take first matching line found
parsed = (shlex.split(x, posix=False) for x in f.readlines())
authline = next(filter(lambda x: x[0] == self.authkey, parsed), None)
except OSError:
print('Error trying to read {}'.format(authfile))
authline = None
if authline is None:
print('Key {} not found in authinfo file: {}'.format(self.authkey, authfile))
elif len(authline) < 5:
print('Incomplete authinfo credentials in {}; key: {}'.format(authfile, self.authkey))
else:
# Override user/pw if previously set
# `authline` is in the following format:
# AUTHKEY username USERNAME password PASSWORD
self.user = authline[2]
self.pw = authline[4]
def _try_override(self, attr, value):
"""
Attempt to override a configuration file option if `self._lock` is
False. Otherwise, warn the user.
:param attr: Configuration attribute.
:param value: Configuration value.
"""
if self._lock is False:
setattr(self, attr, value)
else:
err = "Param '{}' was ignored due to configuration restriction".format(attr)
print(err, file=sys.stderr)
class SASSessionCOM(object):
"""
Initiate a connection to a SAS server and provide access for Windows
clients without the Java dependency. Utilizes available COM objects for
client communication with the IOM interface.
It may be possible to communicate with local SAS instances as well,
although this is functionality is untested. A slight change may be
required to the `_startsas` method to support local instances.
"""
SAS_APP = 'SASApp'
HTML_RESULT_FILE = 'saspy_results.html'
# SASObjectManager.Protocols Enum values
PROTOCOL_COM = 0
PROTOCOL_IOM = 2
# SAS Date/Time/Datetime formats
FMT_DEFAULT_DATE_NAME = 'E8601DA'
FMT_DEFAULT_DATE_LENGTH = 10
FMT_DEFAULT_DATE_PRECISION = 0
FMT_DEFAULT_TIME_NAME = 'E8601TM'
FMT_DEFAULT_TIME_LENGTH = 15
FMT_DEFAULT_TIME_PRECISION = 6
FMT_DEFAULT_DATETIME_NAME = 'E8601DT'
FMT_DEFAULT_DATETIME_LENGTH = 26
FMT_DEFAULT_DATETIME_PRECISION = 6
# Pandas data types
PD_NUM_TYPE = ('i', 'u', 'f', 'c')
PD_STR_TYPE = ('S', 'U', 'V')
PD_DT_TYPE = ('M')
PD_BOOL_TYPE = ('b')
# ADODB RecordSet CursorTypeEnum values
CURSOR_UNSPECIFIED = -1
CURSOR_FORWARD = 0
CURSOR_KEYSET = 1
CURSOR_DYNAMIC = 2
CURSOR_STATIC = 3
# ADODB RecordSet LockTypeEnum values
LOCK_UNSPECIFIED = -1
LOCK_READONLY = 1
LOCK_PESSIMISTIC = 2
LOCK_OPTIMISTIC = 3
LOCK_BATCH_OPTIMISTIC = 4
# ADODB RecordSet CommandTypeEnum values
CMD_UNSPECIFIED = -1
CMD_TEXT = 1
CMD_TABLE = 2
CMD_STORED_PROC = 4
CMD_UNKNOWN = 8
CMD_FILE = 256
CMD_TABLE_DIRECT = 512
# ADODB Connection SchemaEnum values
SCHEMA_COLUMNS = 4
SCHEMA_TABLES = 20
# ADODB ObjectStateEnum values
STATE_CLOSED = 0
STATE_OPEN = 1
# FileService StreamOpenMode values
STREAM_READ = 1
STREAM_WRITE = 2
def __init__(self, **kwargs):
self._log = ''
self.sascfg = SASConfigCOM(**kwargs)
self._sb = kwargs.get('sb')
self.pid = self._startsas()
def __del__(self):
if self.adodb.State == self.STATE_OPEN:
self._endsas()
def _startsas(self) -> str:
"""
Create a workspace and open a connection with SAS.
:return [str]:
"""
if getattr(self, 'workspace', None) is not None:
# Do not create a new connection
return self.workspace.UniqueIdentifier
factory = dynamic.Dispatch('SASObjectManager.ObjectFactoryMulti2')
server = dynamic.Dispatch('SASObjectManager.ServerDef')
self.keeper = dynamic.Dispatch('SASObjectManager.ObjectKeeper')
self.adodb = dynamic.Dispatch('ADODB.Connection')
if self.sascfg.host is None:
# Create a local connection.
server.MachineDNSName = '127.0.0.1'
server.Port = 0
server.Protocol = self.PROTOCOL_COM
user = None
password = None
else:
# Create a remote connection. The following are required:
# 1. host
# 2. port
# 3. class_id
server.MachineDNSName = self.sascfg.host
server.Port = self.sascfg.port
server.Protocol = self.PROTOCOL_IOM
server.ClassIdentifier = self.sascfg.class_id
if self.sascfg.user is not None:
user = self.sascfg.user
else:
user = self.sascfg._prompt('Username: ')
if self.sascfg.pw is not None:
password = self.sascfg.pw
else:
password = self.sascfg._prompt('Password: ', pw=True)
self.workspace = factory.CreateObjectByServer(self.SAS_APP, True,
server, user, password)
self.keeper.AddObject(1, 'WorkspaceObject', self.workspace)
self.adodb.Open('Provider={}; Data Source=iom-id://{}'.format(
self.sascfg.provider, self.workspace.UniqueIdentifier))
ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;", "text")
if self.sascfg.verbose:
print("SAS Connection established. Workspace UniqueIdentifier is "+str(self.workspace.UniqueIdentifier)+"\n")
return self.workspace.UniqueIdentifier
def _endsas(self):
"""
Close a connection with SAS.
"""
self.adodb.Close()
self.keeper.RemoveObject(self.workspace)
self.workspace.Close()
if self.sascfg.verbose:
print("SAS Connection terminated. Workspace UniqueIdentifierid was "+str(self.pid))
def _getlst(self, buf: int=2048) -> str:
"""
Flush listing.
:option buf [int]: Download buffer. Default 2048.
:return [str]:
"""
flushed = self.workspace.LanguageService.FlushList(buf)
result = flushed
while flushed:
flushed = self.workspace.LanguageService.FlushList(buf)
result += flushed
return result
def _getlog(self, buf: int=2048) -> str:
"""
Flush log.
:option buf [int]: Download buffer. Default 2048.
:return [str]:
"""
flushed = self.workspace.LanguageService.FlushLog(buf)
result = flushed
while flushed:
flushed = self.workspace.LanguageService.FlushLog(buf)
result += flushed
# Store flush result in running log
self._log += result
if result.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
return result
def _getfile(self, fname: str, buf: int=2048, decode: bool=False) -> str:
"""
Use object file service to download a file from the provider.
:param fname [str]: Filename.
:option buf [int]: Download buffer. Default 2048.
:option decode [bool]: Decode the byte stream.
:return [str]:
"""
fobj = self.workspace.FileService.AssignFileref('outfile', 'DISK', fname, '', '')
# Use binary stream to support text and image transfers. The binary
# stream interface does not require a max line length, which allows
# support of arbitrarily wide tables.
stream = fobj[0].OpenBinaryStream(self.STREAM_READ)
flushed = stream.Read(buf)
result = bytes(flushed)
while flushed:
flushed = stream.Read(buf)
result += bytes(flushed)
stream.Close()
self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)
if decode is True:
result = result.decode(self.sascfg.encoding, errors='replace')
return result
def _gethtmlfn(self) -> str:
"""
Return the path of the output HTML file. This is the combination of
the `workpath` attribute and `HTML_RESULT_FILE` constant.
:return [str]:
"""
return self._sb.workpath + self.HTML_RESULT_FILE
def _reset(self):
"""
Reset the LanguageService interface to its initial state with respect
to token scanning. Use it to release the LanguageService from an error
state associated with the execution of invalid syntax or incomplete
program source. This primarily occurs when a statement is submitted
without a trailing semicolon.
"""
self.workspace.LanguageService.Reset()
def _tablepath(self, table: str, libref: str=None) -> str:
"""
Define a sas dataset path based on a table name and optional libref
name. Will return a two-level or one-level path string based on the
provided arguments. One-level names are of this form: `table`, while
two-level names are of this form: `libref.table`. If libref is not
defined, SAS will implicitly define the library to WORK or USER. The
USER library needs to have been defined previously in SAS, otherwise
WORK is the default option. If the `libref` parameter is any value
that evaluates to `False`, the one-level path is returned.
:param table [str]: SAS data set name.
:option libref [str]: Optional library name.
:return [str]:
"""
if not libref:
path = "'{}'n".format(table.strip())
else:
path = "{}.'{}'n".format(libref, table.strip())
return path
def _schema(self, table: str, libref: str=None) -> dict:
"""
Request a table schema for a given `libref.table`.
:param table [str]: Table name
:option libref [str]: Library name.
:return [dict]:
"""
#tablepath = self._tablepath(table, libref=libref)
if not libref:
tablepath = table
else:
tablepath = "{}.{}".format(libref, table)
criteria = [None, None, tablepath]
schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)
schema.MoveFirst()
metadata = {}
while not schema.EOF:
col_info = {x.Name: x.Value for x in schema.Fields}
if col_info['FORMAT_NAME'] in self._sb.sas_date_fmts:
col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(days=x) if x else x
elif col_info['FORMAT_NAME'] in self._sb.sas_datetime_fmts:
col_info['CONVERT'] = lambda x: self._sb.SAS_EPOCH + datetime.timedelta(seconds=x) if x else x
# elif FIXME TIME FORMATS
else:
col_info['CONVERT'] = lambda x: x
metadata[col_info['COLUMN_NAME']] = col_info
schema.MoveNext()
schema.Close()
return metadata
def _prompt(self, key: str, hide: bool=False) -> tuple:
"""
Ask the user for input about a given key.
:param key [str]: Key name.
:option hide [bool]: Hide user keyboard input.
:return [tuple]:
"""
input_ok = False
while input_ok is False:
val = self.sascfg._prompt('Enter value for macro variable {} '.format(key), pw=hide)
if val is None:
raise RuntimeError("No value for prompted macro variable provided.")
if val:
input_ok = True
else:
print('Input not valid.')
return (key, val)
def _asubmit(self, code: str, results: str='html'):
"""
Submit any SAS code. Does not return a result.
:param code [str]: SAS statements to execute.
"""
# Support html ods
if results.lower() == 'html':
ods_open = """
ods listing close;
ods {} (id=saspy_internal) options(bitmap_mode='inline')
file="{}"
device=svg
style={};
ods graphics on / outputfmt=png;
""".format(self.sascfg.output, self._gethtmlfn(), self._sb.HTML_Style)
ods_close = """
ods {} (id=saspy_internal) close;
ods listing;
""".format(self.sascfg.output)
else:
ods_open = ''
ods_close = ''
# Submit program
full_code = ods_open + code + ods_close
self.workspace.LanguageService.Submit(full_code)
def submit(self, code: str, results: str='html', prompt: dict=None, **kwargs) -> dict:
"""
Submit any SAS code. Returns log and listing as dictionary with keys
LOG and LST.
:param code [str]: SAS statements to execute.
:option results [str]: Result format. Options: HTML, TEXT. Default HTML.
:option prompt [dict]: Create macro variables from prompted keys.
"""
RESET = """;*';*";*/;quit;run;"""
prompt = prompt if prompt is not None else {}
printto = kwargs.pop('undo', False)
macro_declare = ''
for key, value in prompt.items():
macro_declare += '%let {} = {};\n'.format(*self._prompt(key, value))
# Submit program
self._asubmit(RESET + macro_declare + code + RESET, results)
# Retrieve listing and log
log = self._getlog()
if results.lower() == 'html':
# Make the following replacements in HTML listing:
# 1. Swap \x0c for \n
# 2. Change body class selector
# 3. Increase font size
listing = self._getfile(self._gethtmlfn(), decode=True) \
.replace(chr(12), chr(10)) \
.replace('<body class="c body">', '<body class="l body">') \
.replace('font-size: x-small;', 'font-size: normal;')
else:
listing = self._getlst()
# Invalid syntax will put the interface in to an error state. Reset
# the LanguageService to prevent further errors.
# FIXME: In the future, may only want to reset on ERROR. However, this
# operation seems pretty lightweight, so calling `_reset()` on all
# submits is not a burden.
self._reset()
if printto:
self._asubmit("\nproc printto;run;\n", 'text')
log += self._getlog()
self._sb._lastlog = log
return {'LOG': log, 'LST': listing}
def saslog(self) -> str:
"""
Return the full SAS log.
:return [str]:
"""
return self._log
def exist(self, table: str, libref: str=None) -> bool:
"""
Determine if a `libref.table` exists.
:param table [str]: Table name
:option libref [str]: Library name.
:return [bool]:
"""
#tablepath = self._tablepath(table, libref=libref)
#criteria = [None, None, tablepath]
#schema = self.adodb.OpenSchema(self.SCHEMA_COLUMNS, criteria)
#exists = not schema.BOF
#schema.Close()
#return exists
code = 'data _null_; e = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+");\n"
code += 'v = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n"
code += "te='TABLE_EXISTS='; put te e;run;\n"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("TABLE_EXISTS= ")
l2 = l2[2].partition("\n")
exists = int(l2[0])
return bool(exists)
def read_sasdata(self, table: str, libref: str=None, dsopts: dict=None) -> tuple:
"""
Read any SAS dataset and return as a tuple of header, rows
:param table [str]: Table name
:option libref [str]: Library name.
:option dsopts [dict]: Dataset options.
:return [tuple]:
"""
TARGET = '_saspy_sd2df'
EXPORT = """
data {tgt};
set {tbl} {dopt};
run;
"""
dsopts = self._sb._dsopts(dsopts) if dsopts is not None else ''
tablepath = self._tablepath(table, libref=libref)
recordset = dynamic.Dispatch('ADODB.RecordSet')
# Create an intermediate dataset with `dsopts` applied
export = EXPORT.format(tgt=TARGET, tbl=tablepath, dopt=dsopts)
self.workspace.LanguageService.Submit(export)
meta = self._schema(TARGET)
# Connect RecordSet object to ADODB connection with params:
# Cursor: Forward Only
# Lock: Read Only
# Command: Table Direct
recordset.Open(TARGET, self.adodb, self.CURSOR_FORWARD,
self.LOCK_READONLY, self.CMD_TABLE_DIRECT)
recordset.MoveFirst()
header = [x.Name for x in recordset.Fields]
rows = []
while not recordset.EOF:
rows.append([meta[x.Name]['CONVERT'](x.Value) for x in recordset.Fields])
recordset.MoveNext()
recordset.Close()
return (header, rows, meta)
def read_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=False, opts: dict=None):
"""
Submit an import job to the SAS workspace.
:param filepath [str]: File URI.
:param table [str]: Table name.
:option libref [str]: Library name.
:option nosob [bool]: Return the SAS code instead of executing it.
:option opts [dict]: SAS PROC IMPORT options.
"""
opts = opts if opts is not None else {}
filepath = 'url ' + filepath if filepath.lower().startswith('http') else filepath
tablepath = self._tablepath(table, libref=libref)
proc_code = """
filename csv_file "{}";
proc import datafile=csv_file out={} dbms=csv replace;
{}
run;
""".format(filepath.replace('"', '""'), tablepath, self._sb._impopts(opts))
if nosub is True:
return proc_code
else:
return self.submit(proc_code, 'text')
def write_csv(self, filepath: str, table: str, libref: str=None, nosub: bool=True, dsopts: dict=None, opts: dict=None):
"""
Submit an export job to the SAS workspace.
:param filepath [str]: File URI.
:param table [str]: Table name.
:option libref [str]: Library name.
:option nosob [bool]: Return the SAS code instead of executing it.
:option opts [dict]: SAS PROC IMPORT options.
:option dsopts [dict]: SAS dataset options.
"""
opts = opts if opts is not None else {}
dsopts = dsopts if dsopts is not None else {}
tablepath = self._tablepath(table, libref=libref)
proc_code = """
filename csv_file "{}";
proc export data={} {} outfile=csv_file dbms=csv replace;
{}
run;
""".format(filepath.replace('"', '""'), tablepath, self._sb._dsopts(dsopts), self._sb._expopts(opts))
if nosub is True:
return proc_code
else:
return self.submit(proc_code, 'text')['LOG']
def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
Create a SAS dataset from a pandas data frame.
:param df [pd.DataFrame]: Pandas data frame containing data to write.
:param table [str]: Table name.
:option libref [str]: Library name. Default work.
None of these options are used by this access method; they are needed for other access methods
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
colrep - the char to convert to for any embedded colsep, LF, CR chars in the data; defaults to ' '
datetimes - not implemented yet in this access method
outfmts - not implemented yet in this access method
labels - not implemented yet in this access method
outdsopts - not implemented yet in this access method
encode_errors - not implemented yet in this access method
char_lengths - not implemented yet in this access method
"""
DATETIME_NAME = 'DATETIME26.6'
DATETIME_FMT = '%Y-%m-%dT%H:%M:%S.%f'
if self.sascfg.verbose:
if keep_outer_quotes != False:
print("'keep_outer_quotes=' is not used with this access method. option ignored.")
if embedded_newlines != True:
print("'embedded_newlines=' is not used with this access method. option ignored.")
if LF != '\x01' or CR != '\x02' or colsep != '\x03':
print("'LF=, CR= and colsep=' are not used with this access method. option(s) ignored.")
if datetimes != {}:
print("'datetimes=' is not used with this access method. option ignored.")
if outfmts != {}:
print("'outfmts=' is not used with this access method. option ignored.")
if labels != {}:
print("'labels=' is not used with this access method. option ignored.")
if outdsopts != {}:
print("'outdsopts=' is not used with this access method. option ignored.")
if encode_errors:
print("'encode_errors=' is not used with this access method. option ignored.")
if char_lengths:
print("'char_lengths=' is not used with this access method. option ignored.")
tablepath = self._tablepath(table, libref=libref)
if type(df.index) != pd.RangeIndex:
warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred")
columns = []
formats = {}
for i, name in enumerate(df.columns):
if df[name].dtypes.kind in self.PD_NUM_TYPE:
# Numeric type
definition = "'{}'n num".format(name)
formats[name] = lambda x: str(x) if pd.isnull(x) is False else 'NULL'
elif df[name].dtypes.kind in self.PD_STR_TYPE:
# Character type
# NOTE: If a character string contains a single `'`, replace
# it with `''`. This is the SAS equivalent to `\'`.
length = df[name].map(len).max()
definition = "'{}'n char({})".format(name, length)
formats[name] = lambda x: "'{}'".format(x.replace("'", "''")) if pd.isnull(x) is False else 'NULL'
elif df[name].dtypes.kind in self.PD_DT_TYPE:
# Datetime type
definition = "'{}'n num informat={} format={}".format(name, DATETIME_NAME, DATETIME_NAME)
formats[name] = lambda x: "'{:{}}'DT".format(x, DATETIME_FMT) if pd.isnull(x) is False else 'NULL'
else:
# Default to character type
# NOTE: If a character string contains a single `'`, replace
# it with `''`. This is the SAS equivalent to `\'`.
length = df[name].map(str).map(len).max()
definition = "'{}'n char({})".format(name, length)
formats[name] = lambda x: "'{}'".format(x.replace("'", "''")) if pd.isnull(x) is False else 'NULL'
columns.append(definition)
sql_values = []
for index, row in df.iterrows():
vals = []
for i, col in enumerate(row):
func = formats[df.columns[i]]
vals.append(func(col))
sql_values.append('values({})'.format(', '.join(vals)))
sql_create = 'create table {} ({});'.format(tablepath, ', '.join(columns))
sql_insert = 'insert into {} {};'.format(tablepath, '\n'.join(sql_values))
self.adodb.Execute(sql_create)
self.adodb.Execute(sql_insert)
return None
def sasdata2dataframe(self, table: str, libref: str=None, dsopts: dict=None, method: str='', **kwargs) -> 'pd.DataFrame':
"""
Create a pandas data frame from a SAS dataset.
:param table [str]: Table name.
:option libref [str]: Library name.
:option dsopts [dict]: Dataset options.
:option method [str]: Download method.
:option tempkeep [bool]: Download the csv file if using the csv method.
:option tempfile [str]: File path for the saved output file.
:return [pd.DataFrame]:
"""
# strip off unused by this access method options from kwargs
# so they can't be passes to panda later
rowsep = kwargs.pop('rowsep', ' ')
colsep = kwargs.pop('colsep', ' ')
rowrep = kwargs.pop('rowrep', ' ')
colrep = kwargs.pop('colrep', ' ')
if method.upper() == 'DISK':
print("This access method doesn't support the DISK method. Try CSV or MEMORY")
return None
if method.upper() == 'CSV':
df = self.sasdata2dataframeCSV(table, libref, dsopts=dsopts, **kwargs)
else:
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if self.sascfg.verbose:
if my_fmts != False:
print("'my_fmts=' is not supported in this access method. option ignored.")
if k_dts is not None:
print("'dtype=' is only used with the CSV version of this method. option ignored.")
header, rows, meta = self.read_sasdata(table, libref, dsopts=dsopts)
df = pd.DataFrame.from_records(rows, columns=header, **kwargs)
for col in meta.keys():
if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:
df[col] = pd.to_datetime(df[col], errors='coerce')
elif meta[col]['DATA_TYPE'] == 5:
df[col] = pd.to_numeric(df[col], errors='coerce')
return df
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None,
tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':
"""
Create a pandas data frame from a SAS dataset.
:param table [str]: Table name.
:option libref [str]: Library name.
:option dsopts [dict]: Dataset options.
:option opts [dict]: dictionary containing any of the following Proc Export options(delimiter, putnames)
:option tempkeep [bool]: Download the csv file if using the csv method.
:option tempfile [str]: File path for the saved output file.
:return [pd.DataFrame]:
"""
FORMAT_STRING = '{column} {format}{length}.{precision}'
EXPORT = """
data _saspy_sd2df;
format {fmt};
set {tbl};
run;
proc export data=_saspy_sd2df {dopt}
outfile="{out}"
dbms=csv replace;
{exopts}
run;
"""
k_dts = kwargs.get('dtype', None)
my_fmts = kwargs.pop('my_fmts', False)
if self.sascfg.verbose:
if my_fmts != False:
print("'my_fmts=' is not supported in this access method. option ignored.")
sas_csv = '{}saspy_sd2df.csv'.format(self._sb.workpath)
dopts = self._sb._dsopts(dsopts) if dsopts is not None else ''
tablepath = self._tablepath(table, libref=libref)
expopts = self._sb._expopts(kwargs.pop('opts', {}))
# Convert any date format to one pandas can understand (ISO-8601).
# Save a reference of the column name in a list so pandas can parse
# the column during construction.
datecols = []
fmtlist = []
meta = self._schema(table, libref)
for name, col in meta.items():
if col['FORMAT_NAME'] in self._sb.sas_date_fmts:
datecols.append(name)
col_format = self.FMT_DEFAULT_DATE_NAME
col_length = self.FMT_DEFAULT_DATE_LENGTH
col_precis = self.FMT_DEFAULT_DATE_PRECISION
elif col['FORMAT_NAME'] in self._sb.sas_datetime_fmts:
datecols.append(name)
col_format = self.FMT_DEFAULT_DATETIME_NAME
col_length = self.FMT_DEFAULT_DATETIME_LENGTH
col_precis = self.FMT_DEFAULT_DATETIME_PRECISION
# elif FIXME TIME FORMATS
else:
col_format = col['FORMAT_NAME']
col_length = col['FORMAT_LENGTH']
col_precis = col['FORMAT_DECIMAL']
if col['FORMAT_NAME']:
full_format = FORMAT_STRING.format(
column=col['COLUMN_NAME'],
format=col_format,
length=col_length,
precision=col_precis)
fmtlist.append(full_format)
export = EXPORT.format(fmt=' '.join(fmtlist),
tbl=tablepath,
dopt=dopts,
exopts=expopts,
out=sas_csv)
# Use `LanguageService.Submit` instead of `submit` for a slight
# performance bump. We don't need the log or listing here so skip
# the wrapper function.
self.workspace.LanguageService.Submit(export)
outstring = self._getfile(sas_csv, decode=True)
# Write temp file if requested by user
if kwargs.get('tempkeep') is True and kwargs.get('tempfile') is not None:
with open(kwargs['tempfile'], 'w') as f:
f.write(outstring)
df = pd.read_csv(io.StringIO(outstring), parse_dates=datecols, **kwargs)
if k_dts is None: # don't override these if user provided their own dtypes
for col in meta.keys():
if meta[col]['FORMAT_NAME'] in self._sb.sas_date_fmts + self._sb.sas_datetime_fmts:
df[col] = pd.to_datetime(df[col], errors='coerce')
return df
def upload(self, local: str, remote: str, overwrite: bool=True, permission: str='', **kwargs):
"""
Upload a file to the SAS server.
:param local [str]: Local filename.
:param remote [str]: Local filename.
:option overwrite [bool]: Overwrite the file if it exists.
:option permission [str]: See SAS filename statement documentation.
"""
perms = "PERMISSION='{}'".format(permission) if permission else ''
valid = self._sb.file_info(remote, quiet=True)
if valid == {}:
# Parameter `remote` references a directory. Default to using the
# filename in `local` path.
remote_file = remote + self._sb.hostsep + os.path.basename(local)
elif valid is not None and overwrite is False:
# Parameter `remote` references a file that exists but we cannot
# overwrite it.
# TODO: Raise exception here instead of returning dict
return {'Success': False,
'LOG': 'File {} exists and overwrite was set to False. Upload was stopped.'.format(remote)}
else:
remote_file = remote
with open(local, 'rb') as f:
fobj = self.workspace.FileService.AssignFileref('infile', 'DISK', remote_file, perms, '')
stream = fobj[0].OpenBinaryStream(self.STREAM_WRITE)
stream.Write(f.read())
stream.Close()
self.workspace.FileService.DeassignFileref(fobj[0].FilerefName)
return {'Success': True,
'LOG': 'File successfully written using FileService.'}
def download(self, local: str, remote: str, overwrite: bool=True, **kwargs):
"""
Download a file from the SAS server.
:param local [str]: Local filename.
:param remote [str]: Local filename.
:option overwrite [bool]: Overwrite the file if it exists.
"""
valid = self._sb.file_info(remote, quiet=True)
if valid is None:
# Parameter `remote` references an invalid file path.
# TODO: Raise exception here instead of returning dict
return {'Success': False,
'LOG': 'File {} does not exist.'.format(remote)}
elif valid == {}:
# Parameter `remote` references a directory.
# TODO: Raise exception here instead of returning dict
return {'Success': False,
'LOG': 'File {} is a directory.'.format(remote)}
if os.path.isdir(local) is True:
# Parameter `local` references a directory. Default to using the
# filename in `remote` path.
local_file = os.path.join(local, remote.rpartition(self._sb.hostsep)[2])
else:
local_file = local
with open(local_file, 'wb') as f:
f.write(self._getfile(remote))
return {'Success': True,
'LOG': 'File successfully read using FileService.'}
| apache-2.0 |
ycaihua/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 23 | 8317 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
"""Incremental PCA on dense arrays."""
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
"""Test that the projection of data is correct."""
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
"""Test that the projection of data can be inverted."""
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
"""Test that n_components is >=1 and <= n_features."""
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
"""Test that changing n_components will raise an error."""
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
"""Test that components_ sign is stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
"""Test that components_ values are stable over batch sizes."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
"""Test that fit and partial_fit get equivalent results."""
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
"""Test that IncrementalPCA and PCA are approximate (to a sign flip)."""
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
"""Test that IncrementalPCA and PCA are approximate (to a sign flip)."""
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
"""Test that PCA and IncrementalPCA calculations match"""
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
"""Test that PCA and IncrementalPCA transforms match to sign flip."""
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
yangw1234/BigDL | pyspark/bigdl/optim/optimizer.py | 2 | 40389 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import multiprocessing
import os
import sys
from distutils.dir_util import mkpath
from py4j.java_gateway import JavaObject
from pyspark.rdd import RDD
from bigdl.util.common import DOUBLEMAX
from bigdl.util.common import JTensor
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import callJavaFunc
from bigdl.util.common import get_node_and_core_number
from bigdl.util.common import init_engine
from bigdl.util.common import to_list
from bigdl.dataset.dataset import *
if sys.version >= '3':
long = int
unicode = str
class Top1Accuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top1 = Top1Accuracy()
creating: createTop1Accuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class TreeNNAccuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top1 = TreeNNAccuracy()
creating: createTreeNNAccuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Top5Accuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top5 = Top5Accuracy()
creating: createTop5Accuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Loss(JavaValue):
"""
This evaluation method is calculate loss of output with respect to target
>>> from bigdl.nn.criterion import ClassNLLCriterion
>>> loss = Loss()
creating: createClassNLLCriterion
creating: createLoss
>>> loss = Loss(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createLoss
"""
def __init__(self, cri=None, bigdl_type="float"):
from bigdl.nn.criterion import ClassNLLCriterion
if cri is None:
cri = ClassNLLCriterion()
JavaValue.__init__(self, None, bigdl_type, cri)
class HitRatio(JavaValue):
"""
Hit Ratio(HR) used in recommandation application.
HR intuitively measures whether the test item is present on the top-k list.
>>> hr10 = HitRatio(k = 10)
creating: createHitRatio
"""
def __init__(self, k = 10, neg_num = 100, bigdl_type="float"):
"""
Create hit ratio validation method.
:param k: top k
:param neg_num: number of negative items.
"""
JavaValue.__init__(self, None, bigdl_type, k, neg_num)
class NDCG(JavaValue):
"""
Normalized Discounted Cumulative Gain(NDCG).
NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.
>>> ndcg = NDCG(k = 10)
creating: createNDCG
"""
def __init__(self, k = 10, neg_num = 100, bigdl_type="float"):
"""
Create NDCG validation method.
:param k: top k
:param neg_num: number of negative items.
"""
JavaValue.__init__(self, None, bigdl_type, k, neg_num)
class MAE(JavaValue):
"""
This evaluation method calculates the mean absolute error of output with respect to target.
>>> mae = MAE()
creating: createMAE
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class MaxIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxIteration is a trigger that triggers an action when training reaches
the number of iterations specified by "max".
Usually used as end_trigger when creating an Optimizer.
>>> maxIteration = MaxIteration(20)
creating: createMaxIteration
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxIteration trigger.
:param max: max
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MaxEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxEpoch is a trigger that triggers an action when training reaches
the number of epochs specified by "max_epoch".
Usually used as end_trigger when creating an Optimizer.
>>> maxEpoch = MaxEpoch(2)
creating: createMaxEpoch
"""
def __init__(self, max_epoch, bigdl_type="float"):
"""
Create a MaxEpoch trigger.
:param max_epoch: max_epoch
"""
JavaValue.__init__(self, None, bigdl_type, max_epoch)
class EveryEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
EveryEpoch is a trigger that triggers an action when each epoch finishs.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> everyEpoch = EveryEpoch()
creating: createEveryEpoch
"""
def __init__(self, bigdl_type="float"):
"""
Create a EveryEpoch trigger.
"""
JavaValue.__init__(self, None, bigdl_type)
class SeveralIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
SeveralIteration is a trigger that triggers an action every "n"
iterations.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> serveralIteration = SeveralIteration(2)
creating: createSeveralIteration
"""
def __init__(self, interval, bigdl_type="float"):
"""
Create a SeveralIteration trigger.
:param interval: interval is the "n" where an action is triggeredevery "n" iterations
"""
JavaValue.__init__(self, None, bigdl_type, interval)
class MaxScore(JavaValue):
"""
A trigger that triggers an action when validation score larger than "max" score
>>> maxScore = MaxScore(0.4)
creating: createMaxScore
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxScore trigger.
:param max: max score
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MinLoss(JavaValue):
"""
A trigger that triggers an action when training loss less than "min" loss
>>> minLoss = MinLoss(0.1)
creating: createMinLoss
"""
def __init__(self, min, bigdl_type="float"):
"""
Create a MinLoss trigger.
:param min: min loss
"""
JavaValue.__init__(self, None, bigdl_type, min)
class Poly(JavaValue):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_iteration.
Calculation: base_lr (1 - iter/max_iteration) ^ (power)
:param power: coeffient of decay, refer to calculation formula
:param max_iteration: max iteration when lr becomes zero
>>> poly = Poly(0.5, 2)
creating: createPoly
"""
def __init__(self, power, max_iteration, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_iteration)
class Exponential(JavaValue):
"""
[[Exponential]] is a learning rate schedule, which rescale the learning rate by
lr_{n + 1} = lr * decayRate `^` (iter / decayStep)
:param decay_step the inteval for lr decay
:param decay_rate decay rate
:param stair_case if true, iter / decayStep is an integer division
and the decayed learning rate follows a staircase function.
>>> exponential = Exponential(100, 0.1)
creating: createExponential
"""
def __init__(self, decay_step, decay_rate, stair_case=False, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, decay_step, decay_rate, stair_case)
class Step(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size:
:param gamma:
>>> step = Step(2, 0.3)
creating: createStep
"""
def __init__(self, step_size, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_size, gamma)
class Default(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size
:param gamma
>>> step = Default()
creating: createDefault
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Plateau(JavaValue):
"""
Plateau is the learning rate schedule when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor of 2-10
once learning stagnates. It monitors a quantity and if no improvement
is seen for a 'patience' number of epochs, the learning rate is reduced.
:param monitor quantity to be monitored, can be Loss or score
:param factor factor by which the learning rate will be reduced. new_lr = lr * factor
:param patience number of epochs with no improvement after which learning rate will be reduced.
:param mode one of {min, max}.
In min mode, lr will be reduced when the quantity monitored has stopped decreasing;
in max mode it will be reduced when the quantity monitored has stopped increasing
:param epsilon threshold for measuring the new optimum, to only focus on significant changes.
:param cooldown number of epochs to wait before resuming normal operation
after lr has been reduced.
:param min_lr lower bound on the learning rate.
>>> plateau = Plateau("score")
creating: createPlateau
"""
def __init__(self,
monitor,
factor=0.1,
patience=10,
mode="min",
epsilon=1e-4,
cooldown=0,
min_lr=0.0,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, monitor, factor, patience, mode, epsilon,
cooldown, min_lr)
class Warmup(JavaValue):
"""
A learning rate gradual increase policy, where the effective learning rate
increase delta after each iteration.
Calculation: base_lr + delta * iteration
:param delta: increase amount after each iteration
>>> warmup = Warmup(0.05)
creating: createWarmup
"""
def __init__(self, delta, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, delta)
class SequentialSchedule(JavaValue):
"""
Stack several learning rate schedulers.
:param iterationPerEpoch: iteration numbers per epoch
>>> sequentialSchedule = SequentialSchedule(5)
creating: createSequentialSchedule
>>> poly = Poly(0.5, 2)
creating: createPoly
>>> test = sequentialSchedule.add(poly, 5)
"""
def __init__(self, iteration_per_epoch, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, iteration_per_epoch)
def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
class OptimMethod(JavaValue):
def __init__(self, jvalue, bigdl_type, *args):
if (jvalue):
assert(type(jvalue) == JavaObject)
self.value = jvalue
else:
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
@staticmethod
def load(path, bigdl_type="float"):
"""
load optim method
:param path: file path
"""
return callBigDlFunc(bigdl_type, "loadOptimMethod", path)
def save(self, path, overWrite):
"""
save OptimMethod
:param path path
:param overWrite whether to overwrite
"""
method=self.value
return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite)
class SGD(OptimMethod):
"""
A plain implementation of SGD
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
:param momentum momentum
:param dampening dampening for momentum
:param nesterov enables Nesterov momentum
:param learningrates 1D tensor of individual learning rates
:param weightdecays 1D tensor of individual weight decays
>>> sgd = SGD()
creating: createDefault
creating: createSGD
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
momentum=0.0,
dampening=DOUBLEMAX,
nesterov=False,
leaningrate_schedule=None,
learningrates=None,
weightdecays=None,
bigdl_type="float"):
super(SGD, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay,
momentum, dampening, nesterov,
leaningrate_schedule if (leaningrate_schedule) else Default(),
JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
class Adagrad(OptimMethod):
"""
An implementation of Adagrad. See the original paper:
http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
>>> adagrad = Adagrad()
creating: createAdagrad
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
bigdl_type="float"):
super(Adagrad, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay)
class LBFGS(OptimMethod):
"""
This implementation of L-BFGS relies on a user-provided line
search function (state.lineSearch). If this function is not
provided, then a simple learningRate is used to produce fixed
size steps. Fixed size steps are much less costly than line
searches, and can be useful for stochastic problems.
The learning rate is used even when a line search is provided.
This is also useful for large-scale stochastic problems, where
opfunc is a noisy approximation of f(x). In that case, the learning
rate allows a reduction of confidence in the step size.
:param max_iter Maximum number of iterations allowed
:param max_eval Maximum number of function evaluations
:param tolfun Termination tolerance on the first-order optimality
:param tolx Termination tol on progress in terms of func/param changes
:param ncorrection
:param learningrate
:param verbose
:param linesearch A line search function
:param linesearch_options If no line search provided, then a fixed step size is used
>>> lbfgs = LBFGS()
creating: createLBFGS
"""
def __init__(self,
max_iter=20,
max_eval=DOUBLEMAX,
tolfun=1e-5,
tolx=1e-9,
ncorrection=100,
learningrate=1.0,
verbose=False,
linesearch=None,
linesearch_options=None,
bigdl_type="float"):
if linesearch or linesearch_options:
raise ValueError('linesearch and linesearch_options must be None in LBFGS')
super(LBFGS, self).__init__(None, bigdl_type, max_iter, max_eval, tolfun, tolx,
ncorrection, learningrate, verbose, linesearch, linesearch_options)
class Adadelta(OptimMethod):
"""
Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701
:param decayrate interpolation parameter rho
:param epsilon for numerical stability
>>> adagrad = Adadelta()
creating: createAdadelta
"""
def __init__(self,
decayrate = 0.9,
epsilon = 1e-10,
bigdl_type="float"):
super(Adadelta, self).__init__(None, bigdl_type, decayrate, epsilon)
class Adam(OptimMethod):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adam = Adam()
creating: createAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
bigdl_type="float"):
super(Adam, self).__init__(None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon)
class ParallelAdam(OptimMethod):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> init_engine()
>>> pAdam = ParallelAdam()
creating: createParallelAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
parallel_num = -1,
bigdl_type="float"):
if parallel_num == -1:
parallel_num = get_node_and_core_number()[1]
super(ParallelAdam, self).__init__(None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon, parallel_num)
class Ftrl(OptimMethod):
"""
An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.
Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.
:param learningrate learning rate
:param learningrate_power double, must be less or equal to zero. Default is -0.5.
:param initial_accumulator_value double, the starting value for accumulators,
require zero or positive values.
:param l1_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_shrinkage_regularization_strength double, must be greater or equal to zero.
Default is zero. This differs from l2RegularizationStrength above. L2 above is a
stabilization penalty, whereas this one is a magnitude penalty.
>>> ftrl = Ftrl()
creating: createFtrl
>>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)
creating: createFtrl
"""
def __init__(self,
learningrate = 1e-3,
learningrate_power = -0.5,
initial_accumulator_value = 0.1,
l1_regularization_strength = 0.0,
l2_regularization_strength = 0.0,
l2_shrinkage_regularization_strength = 0.0,
bigdl_type="float"):
super(Ftrl, self).__init__(None, bigdl_type, learningrate, learningrate_power,
initial_accumulator_value,
l1_regularization_strength,
l2_regularization_strength,
l2_shrinkage_regularization_strength)
class Adamax(OptimMethod):
"""
An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adamax()
creating: createAdamax
"""
def __init__(self,
learningrate = 0.002,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-38,
bigdl_type="float"):
super(Adamax, self).__init__(None, bigdl_type, learningrate, beta1, beta2, epsilon)
class RMSprop(OptimMethod):
"""
An implementation of RMSprop
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param decayrate decay rate, also called rho
:param epsilon for numerical stability
>>> adagrad = RMSprop()
creating: createRMSprop
"""
def __init__(self,
learningrate = 1e-2,
learningrate_decay = 0.0,
decayrate = 0.99,
epsilon = 1e-8,
bigdl_type="float"):
super(RMSprop, self).__init__(None, bigdl_type, learningrate, learningrate_decay, decayrate, epsilon)
class MultiStep(JavaValue):
"""
similar to step but it allows non uniform steps defined by stepSizes
:param step_size: the series of step sizes used for lr decay
:param gamma: coefficient of decay
>>> step = MultiStep([2, 5], 0.3)
creating: createMultiStep
"""
def __init__(self, step_sizes, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_sizes, gamma)
class BaseOptimizer(JavaValue):
def set_model(self, model):
"""
Set model.
:param model: new model
"""
self.value.setModel(model.value)
def set_criterion(self, criterion):
"""
set new criterion, for optimizer reuse
:param criterion: new criterion
:return:
"""
callBigDlFunc(self.bigdl_type, "setCriterion", self.value,
criterion)
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
def set_gradclip_const(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value)
def set_gradclip_l2norm(self, clip_norm):
"""
Configure L2 norm clipping settings.
:param clip_norm: gradient L2-Norm threshold
"""
callBigDlFunc(self.bigdl_type, "setL2NormClip", self.value, clip_norm)
def disable_gradclip(self):
"""
disable clipping.
"""
callBigDlFunc(self.bigdl_type, "disableClip", self.value)
# return a module
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel)
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
def prepare_input(self):
"""
Load input. Notebook user can call this method to seprate load data and
create optimizer time
"""
print("Loading input ...")
self.value.prepareInput()
def set_end_when(self, end_when):
"""
When to stop, passed in a [[Trigger]]
"""
self.value.setEndWhen(end_when.value)
return self
class Optimizer(BaseOptimizer):
# NOTE: This is a deprecated method, you should use `create` method instead.
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create a distributed optimizer.
:param model: the neural net model
:param training_rdd: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
self.pvalue = DistriOptimizer(model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method,
bigdl_type)
self.value = self.pvalue.value
self.bigdl_type = self.pvalue.bigdl_type
@staticmethod
def create(model,
training_set,
criterion,
end_trigger=None,
batch_size=32,
optim_method=None,
cores=None,
bigdl_type="float"):
"""
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
"""
if not end_trigger:
end_trigger = MaxEpoch(1)
if not optim_method:
optim_method = SGD()
if isinstance(training_set, RDD) or isinstance(training_set, DataSet):
return DistriOptimizer(model=model,
training_rdd=training_set,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
bigdl_type=bigdl_type)
elif isinstance(training_set, tuple) and len(training_set) == 2:
x, y = training_set
return LocalOptimizer(X=x,
Y=y,
model=model,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
cores=cores,
bigdl_type="float")
else:
raise Exception("Not supported training set: %s" % type(training_set))
def set_validation(self, batch_size, val_rdd, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
func_name = "setValidation"
if isinstance(val_rdd, DataSet):
func_name = "setValidationFromDataSet"
callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size,
trigger, val_rdd, to_list(val_method))
def set_traindata(self, training_rdd, batch_size):
"""
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "setTrainData", self.value,
training_rdd, batch_size)
class DistriOptimizer(Optimizer):
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create an optimizer.
:param model: the neural net model
:param training_data: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
if not optim_method:
optim_methods = {model.name(): SGD()}
elif isinstance(optim_method, OptimMethod):
optim_methods = {model.name(): optim_method}
elif isinstance(optim_method, JavaObject):
optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
else:
optim_methods = optim_method
if isinstance(training_rdd, RDD):
JavaValue.__init__(self, None, bigdl_type, model.value,
training_rdd, criterion,
optim_methods, end_trigger, batch_size)
elif isinstance(training_rdd, DataSet):
self.bigdl_type = bigdl_type
self.value = callBigDlFunc(self.bigdl_type, "createDistriOptimizerFromDataSet",
model.value, training_rdd, criterion,
optim_methods, end_trigger, batch_size)
class LocalOptimizer(BaseOptimizer):
"""
Create an optimizer.
:param model: the neural net model
:param X: the training features which is an ndarray or list of ndarray
:param Y: the training label which is an ndarray
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
:param cores: by default is the total physical cores.
"""
def __init__(self,
X,
Y,
model,
criterion,
end_trigger,
batch_size,
optim_method=None,
cores=None,
bigdl_type="float"):
if not optim_method:
optim_methods = {model.name(): SGD()}
elif isinstance(optim_method, OptimMethod):
optim_methods = {model.name(): optim_method}
elif isinstance(optim_method, JavaObject):
optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
else:
optim_methods = optim_method
if cores is None:
cores = multiprocessing.cpu_count()
JavaValue.__init__(self, None, bigdl_type,
[JTensor.from_ndarray(X) for X in to_list(X)],
JTensor.from_ndarray(Y),
model.value,
criterion,
optim_methods, end_trigger, batch_size, cores)
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param X_val: features of validation dataset
:param Y_val: label of validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
JTensor.from_ndarray(Y_val), to_list(val_method))
class TrainSummary(JavaValue, ):
"""
A logging facility which allows user to trace how indicators (e.g.
learning rate, training loss, throughput, etc.) change with iterations/time
in an optimization process. TrainSummary is for training indicators only
(check ValidationSummary for validation indicators). It contains necessary
information for the optimizer to know where to store the logs, how to
retrieve the logs, and so on. - The logs are written in tensorflow-compatible
format so that they can be visualized directly using tensorboard. Also the
logs can be retrieved as ndarrays and visualized using python libraries
such as matplotlib (in notebook, etc.).
Use optimizer.setTrainSummary to enable train logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a TrainSummary. Logs will be saved to log_dir/app_name/train.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve train logs by type. Return an array of records in the format
(step,value,wallClockTime). - "Step" is the iteration count by default.
:param tag: the type of the logs, Supported tags are: "LearningRate","Loss", "Throughput"
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
class ValidationSummary(JavaValue):
"""
A logging facility which allows user to trace how indicators (e.g.
validation loss, top1 accuray, top5 accuracy etc.) change with
iterations/time in an optimization process. ValidationSummary is for
validation indicators only (check TrainSummary for train indicators).
It contains necessary information for the optimizer to know where to
store the logs, how to retrieve the logs, and so on. - The logs are
written in tensorflow-compatible format so that they can be visualized
directly using tensorboard. Also the logs can be retrieved as ndarrays
and visualized using python libraries such as matplotlib
(in notebook, etc.).
Use optimizer.setValidationSummary to enable validation logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a ValidationSummary. Logs will be saved to
log_dir/app_name/train. By default, all ValidationMethod set into
optimizer will be recorded and the recording interval is the same
as trigger of ValidationMethod in the optimizer.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve validation logs by type. Return an array of records in the
format (step,value,wallClockTime). - "Step" is the iteration count
by default.
:param tag: the type of the logs. The tag should match the name ofthe ValidationMethod set into the optimizer. e.g."Top1AccuracyLoss","Top1Accuracy" or "Top5Accuracy".
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
class L1L2Regularizer(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class ActivityRegularization(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class L1Regularizer(JavaValue):
"""
Apply L1 regularization
:param l1 l1 regularization rate
"""
def __init__(self, l1, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1)
class L2Regularizer(JavaValue):
"""
Apply L2 regularization
:param l2 l2 regularization rate
"""
def __init__(self, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l2)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.optim import optimizer
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = optimizer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test optimizer",
conf=create_spark_conf())
init_engine()
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
idlead/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
kikocorreoso/mplutils | mplutils/axes.py | 1 | 8516 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 23:43:37 2016
@author: kiko
"""
from __future__ import division, absolute_import
from .settings import RICH_DISPLAY
import numpy as np
if RICH_DISPLAY:
from IPython.display import display
def axes_set_better_defaults(ax,
axes_color = '#777777',
grid = False,
show = False):
"""
Enter an Axes instance and it will change the defaults to an opinionated
version of how a simple plot should be.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
axes_color : str
A string indicating a valid matplotlib color.
grid : bool
If `True` the grid of the axes will be shown, if `False` (default)
the grid, if active, will be supressed.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_axis_bgcolor((1, 1, 1))
ax.grid(grid)
for key in ax.spines.keys():
if ax.spines[key].get_visible():
ax.spines[key].set_color(axes_color)
ax.tick_params(axis = 'x', colors = axes_color)
ax.tick_params(axis = 'y', colors = axes_color)
ax.figure.set_facecolor('white')
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
# http://matplotlib.org/examples/pylab_examples/spine_placement_demo.html
def axes_set_axis_position(ax,
spines = ['bottom', 'left'],
pan = 0,
show = False):
"""
Enter an Axes instance and depending the options it will display the
axis where you selected.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
spines : str or iterable
A string or an iterable of strings with the following valid options:
'bottom' : To active the bottom x-axis.
'top' : To active the top x-axis.
'left' : To active the left y-axis.
'right' : To active the right y-axis.
pan : int or iterable
A integer value or an iterable of integer values indicating the value
to pan the axis. It has to have the same lenght and the same order
than the spines input.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
if np.isscalar(spines):
spines = (spines,)
len_spines = 1
else:
len_spines = len(spines)
if np.isscalar(pan):
pan = np.repeat(pan, len_spines)
len_pan = 1
else:
len_pan = len(pan)
if len_pan > 1 and len_pan != len_spines:
raise ValueError(('Length of `spines` and `pan` mismatch. `pan` ')
('should be a scalar or should have the same length than `spines`.'))
i = 0
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', pan[i])) # outward by `pan` points
spine.set_smart_bounds(True)
i += 1
else:
#spine.set_color('none') # don't draw spine
spine.set_visible(False)
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
ax.tick_params(labelleft = True)
if 'right' in spines:
ax.yaxis.set_ticks_position('right')
ax.tick_params(labelright = True)
if 'left' in spines and 'right' in spines:
ax.yaxis.set_ticks_position('both')
ax.tick_params(labelleft = True, labelright = True)
if 'left' not in spines and 'right' not in spines:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(labelbottom = True)
if 'top' in spines:
ax.xaxis.set_ticks_position('top')
ax.tick_params(labeltop = True)
if 'bottom' in spines and 'top' in spines:
ax.xaxis.set_ticks_position('both')
ax.tick_params(labelbottom = True, labeltop = True)
if 'bottom' not in spines and 'top' not in spines:
ax.xaxis.set_ticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_origin(ax,
x = 0,
y = 0,
xticks_position = 'bottom',
yticks_position = 'left',
xticks_visible = True,
yticks_visible = True,
show = False):
"""
function to locate x-axis and y-axis on the position you want.
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
x : int or float
Value indicating the position on the y-axis where you want the x-axis
to be located.
y : int or float
Value indicating the position on the x-axis where you want the y-axis
to be located.
xticks_position : str
Default value is 'bottom' if you want the ticks to be located below
the x-axis. 'top' if you want the ticks to be located above the x-axis.
yticks_position : str
Default value is 'left' if you want the ticks to be located on the left
side of the y-axis. 'right' if you want the ticks to be located on the
right side of the y-axis.
xticks_visible : bool
Default value is True if you want ticks visible on the x-axis. False
if you don't want to see the ticks on the x-axis.
yticks_visible : bool
Default value is True if you want ticks visible on the y-axis. False
if you don't want to see the ticks on the y-axis.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position(xticks_position)
ax.spines['bottom'].set_position(('data', x))
ax.yaxis.set_ticks_position(yticks_position)
ax.spines['left'].set_position(('data', y))
if not xticks_visible:
ax.set_xticks([])
if not yticks_visible:
ax.set_yticks([])
ax.figure.canvas.draw()
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show()
def axes_set_aspect_ratio(ax, ratio = 'equal', show = True):
"""
function that accepts an Axes instance and update the information
setting the aspect ratio of the axis to the defined quantity
Parameters:
-----------
ax : matplotlib.axes.Axes or matplotlib.axes.Subplot instance
ratio : str or int/float
The value can be a string with the following values:
'equal' : (default) same scaling from data to plot units for x and y
'auto' : automatic; fill position rectangle with data
Or a:
number (int or float) : a circle will be stretched such that the
height is num times the width. aspec t =1 is the same as
aspect='equal'.
show : bool
if `True` the figure will be shown.
If you are working in a rich display environment like the IPython
qtconsole or the Jupyter notebook it will use
`IPython.display.display` to show the figure.
If you are working otherwise it will call the `show` of the
`Figure` instance.
"""
ax.set_aspect(ratio, adjustable = None)
if show:
if RICH_DISPLAY:
display(ax.figure)
else:
ax.figure.show() | mit |
abimannans/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | measure_resolution/lmfit-py/examples/confidence_interval.py | 4 | 2924 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 15 19:47:45 2012
@author: Tillsten
"""
import numpy as np
from lmfit import Parameters, Minimizer, conf_interval, report_fit, report_ci
from numpy import linspace, zeros, sin, exp, random, sqrt, pi, sign
from scipy.optimize import leastsq
try:
import matplotlib.pyplot as plt
import pylab
HASPYLAB = True
except ImportError:
HASPYLAB = False
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.33)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)
def residual(pars, x, data=None):
amp = pars['amp'].value
per = pars['period'].value
shift = pars['shift'].value
decay = pars['decay'].value
if abs(shift) > pi/2:
shift = shift - sign(shift)*pi
model = amp*sin(shift + x/per) * exp(-x*x*decay*decay)
if data is None:
return model
return (model - data)
n = 2500
xmin = 0.
xmax = 250.0
noise = random.normal(scale=0.7215, size=n)
x = linspace(xmin, xmax, n)
data = residual(p_true, x) + noise
fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
mini = Minimizer(residual, fit_params, fcn_args=(x,),
fcn_kws={'data':data})
out = mini.leastsq()
fit = residual(out.params, x)
print( ' N fev = ', out.nfev)
print( out.chisqr, out.redchi, out.nfree)
report_fit(out.params)
#ci=calc_ci(out)
ci, tr = conf_interval(mini, out, trace=True)
report_ci(ci)
if HASPYLAB:
names=out.params.keys()
i=0
gs=pylab.GridSpec(4,4)
sx={}
sy={}
for fixed in names:
j=0
for free in names:
if j in sx and i in sy:
ax=pylab.subplot(gs[i,j],sharex=sx[j],sharey=sy[i])
elif i in sy:
ax=pylab.subplot(gs[i,j],sharey=sy[i])
sx[j]=ax
elif j in sx:
ax=pylab.subplot(gs[i,j],sharex=sx[j])
sy[i]=ax
else:
ax=pylab.subplot(gs[i,j])
sy[i]=ax
sx[j]=ax
if i<3:
pylab.setp( ax.get_xticklabels(), visible=False)
else:
ax.set_xlabel(free)
if j>0:
pylab.setp( ax.get_yticklabels(), visible=False)
else:
ax.set_ylabel(fixed)
res=tr[fixed]
prob=res['prob']
f=prob<0.96
x,y=res[free], res[fixed]
ax.scatter(x[f],y[f],
c=1-prob[f],s=200*(1-prob[f]+0.5))
ax.autoscale(1,1)
j=j+1
i=i+1
pylab.show()
| apache-2.0 |
lizardsystem/threedilib | threedilib/modeling/convert.py | 1 | 8275 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""
Convert shapefiles with z coordinates. Choose from the following formats:
'inp' to create an inp file, 'img' to create an image with a plot of the
feature, or 'shp' to output a shapefile with the average height of a
feature stored in an extra attribute.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import argparse
import math
import os
import shutil
import tempfile
from matplotlib.backends import backend_agg
from matplotlib import figure
from osgeo import gdal
from osgeo import ogr
from PIL import Image
ogr.UseExceptions()
def get_parser():
""" Return argument parser. """
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument('source_path',
metavar='SOURCE',
help=('Path to source shapefile.'))
parser.add_argument('target_path',
metavar='TARGET',
help=('Path to target file.'))
parser.add_argument('-of', '--output-format',
metavar='FORMAT',
choices=['inp', 'img', 'shp'],
default='shp',
help=("Path to output."))
return parser
class InputFileWriter(object):
""" Writer for input files. """
def __init__(self, path):
"""
Init the counters and tmpdirs
"""
self.path = path
self.node_count = 0
self.link_count = 0
def __enter__(self):
""" Setup tempfiles. """
self.temp_directory = tempfile.mkdtemp()
self.node_file = open(
os.path.join(self.temp_directory, 'nodes'), 'a+',
)
self.link_file = open(
os.path.join(self.temp_directory, 'links'), 'a+',
)
return self
def __exit__(self, type, value, traceback):
""" Write 'inputfile' at path. """
with open(self.path, 'w') as input_file:
self.node_file.seek(0)
input_file.write(self.node_file.read())
input_file.write('-1\n')
self.link_file.seek(0)
input_file.write(self.link_file.read())
self.node_file.close()
self.link_file.close()
shutil.rmtree(self.temp_directory)
def _write_node(self, node):
""" Write a node. """
self.node_count += 1
self.node_file.write('{} {} {} {}\n'.format(
self.node_count, node[0], node[1], -node[2] # Depth, not height!
))
def _write_link(self):
""" Write a link between previous node and next node."""
self.link_count += 1
self.link_file.write('{} {} {}\n'.format(
self.link_count, self.node_count, self.node_count + 1,
))
def _add_wkb_line_string(self, wkb_line_string):
""" Add linestring as nodes and links. """
nodes = [wkb_line_string.GetPoint(i)
for i in range(wkb_line_string.GetPointCount())]
# Add nodes and links up to the last node
for i in range(len(nodes) - 1):
self._write_node(nodes[i])
self._write_link()
# Add last node, link already covered.
self._write_node(nodes[-1])
def add_feature(self, feature):
""" Add feature as nodes and links. """
geometry = feature.geometry()
geometry_type = geometry.GetGeometryType()
if geometry_type == ogr.wkbLineString25D:
self._add_wkb_line_string(geometry)
elif geometry_type == ogr.wkbMultiLineString25D:
for wkb_line_string in geometry:
self._add_wkb_line_string(wkb_line_string)
class ImageWriter(object):
""" Writer for images. """
def __init__(self, path):
self.count = 0
self.path = path
def __enter__(self):
return self
def _add_wkb_line_string(self, wkb_line_string, label):
""" Plot linestring as separate image. """
# Get data
x, y, z = zip(*[wkb_line_string.GetPoint(i)
for i in range(wkb_line_string.GetPointCount())])
# Determine distance along line
l = [0]
for i in range(len(z) - 1):
l.append(l[-1] + math.sqrt(
(x[i + 1] - x[i]) ** 2 + (y[i + 1] - y[i]) ** 2,
))
# Plot in matplotlib
fig = figure.Figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(l, z, label=label)
axes.legend(loc='best', frameon=False)
# Write to image
backend_agg.FigureCanvasAgg(fig)
buf, size = fig.canvas.print_to_buffer()
image = Image.fromstring('RGBA', size, buf)
root, ext = os.path.splitext(self.path)
image.save(root + '{:00.0f}'.format(self.count) + ext)
self.count += 1
def add_feature(self, feature):
""" Currently saves every feature in a separate image. """
# Plotlabel
label = '\n'.join([': '.join(str(v) for v in item)
for item in feature.items().items()])
# Plot according to geometry type
geometry = feature.geometry()
geometry_type = geometry.GetGeometryType()
if geometry_type == ogr.wkbLineString25D:
self._add_wkb_line_string(geometry, label=label)
elif geometry_type == ogr.wkbMultiLineString25D:
for wkb_line_string in geometry:
self._add_wkb_line_string(wkb_line_string, label=label)
def __exit__(self, type, value, traceback):
pass
class ShapefileWriter(object):
""" Writer for shapefiles. """
ATTRIBUTE = b'kruinhoogt'
def __init__(self, path):
self.count = 0
self.path = path
self.datasource = None
self.layer = None
def __enter__(self):
return self
def create_datasource(self, feature):
""" Create a datasource based on feature. """
root, ext = os.path.splitext(os.path.basename(self.path))
driver = ogr.GetDriverByName(b'ESRI Shapefile')
datasource = driver.CreateDataSource(self.path)
layer = datasource.CreateLayer(root)
for i in range(feature.GetFieldCount()):
layer.CreateField(feature.GetFieldDefnRef(i))
field_defn = ogr.FieldDefn(self.ATTRIBUTE, ogr.OFTReal)
layer.CreateField(field_defn)
self.datasource = datasource
self.layer = layer
def add_feature(self, feature):
""" Currently saves every feature in a separate image. """
if self.layer is None:
self.create_datasource(feature)
layer_defn = self.layer.GetLayerDefn()
# elevation
geometry = feature.geometry().Clone()
geometry_type = geometry.GetGeometryType()
if geometry_type == ogr.wkbLineString25D:
elevation = min([p[2] for p in geometry.GetPoints()])
else:
# multilinestring
elevation = min([p[2]
for g in geometry
for p in g.GetPoints()])
geometry.FlattenTo2D()
new_feature = ogr.Feature(layer_defn)
new_feature.SetGeometry(geometry)
for k, v in feature.items().items():
new_feature[k] = v
new_feature[self.ATTRIBUTE] = elevation
self.layer.CreateFeature(new_feature)
def __exit__(self, type, value, traceback):
pass
def convert(source_path, target_path, output_format):
""" Convert shapefile to inp file."""
source_dataset = ogr.Open(str(source_path))
writers = dict(
inp=InputFileWriter,
img=ImageWriter,
shp=ShapefileWriter,
)
with writers[output_format](target_path) as writer:
for source_layer in source_dataset:
total = source_layer.GetFeatureCount()
for count, source_feature in enumerate(source_layer, 1):
writer.add_feature(source_feature)
gdal.TermProgress_nocb(count / total)
def main():
""" Call convert() with commandline args. """
convert(**vars(get_parser().parse_args()))
if __name__ == '__main__':
exit(main())
| gpl-3.0 |
JackKelly/neuralnilm_prototype | scripts/e115.py | 2 | 3654 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
sinhrks/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
Pymatteo/QtNMR | build/exe.win32-3.4/scipy/cluster/tests/test_hierarchy.py | 7 | 34863 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy.lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
# import matplotlib
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
################### linkage
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
################### linkage on Q
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(num_obs_linkage(Z) == i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
self.assertRaises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
self.assertTrue(num_obs_linkage(Z) == n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
self.assertRaises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
self.assertTrue(is_monotonic(Z) == True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
lbishal/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/scipy/misc/common.py | 2 | 14711 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
from scipy.lib.six.moves import xrange
from numpy import exp, log, asarray, arange, newaxis, hstack, product, array, \
where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, \
r_, rollaxis, sum, fromstring
__all__ = ['logsumexp', 'factorial','factorial2','factorialk','comb',
'central_diff_weights', 'derivative', 'pade', 'lena', 'ascent', 'face']
# XXX: the factorial functions could move to scipy.special, and the others
# to numpy perhaps?
def logsumexp(a, axis=None, b=None):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis over which the sum is taken. By default `axis` is None,
and all elements are summed.
.. versionadded:: 0.11.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
.. versionadded:: 0.12.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.misc import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
"""
a = asarray(a)
if axis is None:
a = a.ravel()
else:
a = rollaxis(a, axis)
a_max = a.max(axis=0)
if b is not None:
b = asarray(b)
if axis is None:
b = b.ravel()
else:
b = rollaxis(b, axis)
out = log(sum(b * exp(a - a_max), axis=0))
else:
out = log(sum(exp(a - a_max), axis=0))
out += a_max
return out
def factorial(n,exact=0):
"""
The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=0 case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> arr = np.array([3,4,5])
>>> sc.factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> sc.factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0
val = 1
for k in xrange(1,n+1):
val *= k
return val
else:
from scipy import special
n = asarray(n)
sv = special.errprint(0)
vals = special.gamma(n+1)
sv = special.errprint(sv)
return where(n>=0,vals,0)
def factorial2(n, exact=False):
"""
Double factorial.
This is the factorial with every second value skipped, i.e.,
``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n,0,-2):
val *= k
return val
else:
from scipy import special
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n,k,exact=1):
"""
n(!!...!) = multifactorial of order k
k times
Parameters
----------
n : int, array_like
Calculate multifactorial. Arrays are only supported with exact
set to False. If `n` < 0, the return value is 0.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multi factorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> sc.factorialk(5, 1, exact=True)
120L
>>> sc.factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n<=0:
return 1
val = 1
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
def comb(N,k,exact=0):
"""
The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : int, optional
If `exact` is 0, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=0 case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> sc.comb(n, k, exact=False)
array([ 120., 210.])
>>> sc.comb(10, 3, exact=True)
120L
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
from scipy import special
k,N = asarray(k), asarray(N)
lgam = special.gammaln
cond = (k <= N) & (N >= 0) & (k >= 0)
sv = special.errprint(0)
vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1))
sv = special.errprint(sv)
return where(cond, vals, 0.0)
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def x2(x):
... return x*x
...
>>> derivative(x2, 2)
4.0
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n==1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n==2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The pade approximation of the polynomial defined by `an` is
`p(x)/q(x)`.
Examples
--------
>>> from scipy import misc
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = misc.pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import pickle, os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(pickle.load(f))
f.close()
return lena
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle, os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
f = open(fname,'rb')
ascent = array(pickle.load(f))
f.close()
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True then return color image, otherwise return an 8-bit gray-scale
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
230
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2, os
rawdata = open(os.path.join(os.path.dirname(__file__), 'face.dat')).read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| mit |
kiyoto/statsmodels | statsmodels/sandbox/panel/mixed.py | 31 | 21019 | """
Mixed effects models
Author: Jonathan Taylor
Author: Josef Perktold
License: BSD-3
Notes
------
It's pretty slow if the model is misspecified, in my first example convergence
in loglike is not reached within 2000 iterations. Added stop criteria based
on convergence of parameters instead.
With correctly specified model, convergence is fast, in 6 iterations in
example.
"""
from __future__ import print_function
import numpy as np
import numpy.linalg as L
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.tools.decorators import cache_readonly
class Unit(object):
"""
Individual experimental unit for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
endog : ndarray, (nobs,)
response, endogenous variable
exog_fe : ndarray, (nobs, k_vars_fe)
explanatory variables as regressors or fixed effects,
should include exog_re to correct mean of random
coefficients, see Notes
exog_re : ndarray, (nobs, k_vars_re)
explanatory variables or random effects or coefficients
Notes
-----
If the exog_re variables are not included in exog_fe, then the
mean of the random constants or coefficients are not centered.
The covariance matrix of the random parameter estimates are not
centered in this case. (That's how it looks to me. JP)
"""
def __init__(self, endog, exog_fe, exog_re):
self.Y = endog
self.X = exog_fe
self.Z = exog_re
self.n = endog.shape[0]
def _compute_S(self, D, sigma):
"""covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit))
"""
self.S = (np.identity(self.n) * sigma**2 +
np.dot(self.Z, np.dot(D, self.Z.T)))
def _compute_W(self):
"""inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit))
"""
self.W = L.inv(self.S)
def compute_P(self, Sinv):
"""projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W'
"""
t = np.dot(self.W, self.X)
self.P = self.W - np.dot(np.dot(t, Sinv), t.T)
def _compute_r(self, alpha):
"""residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit))
"""
self.r = self.Y - np.dot(self.X, alpha)
def _compute_b(self, D):
"""coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r
"""
self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r))
def fit(self, a, D, sigma):
"""
Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5).
"""
self._compute_S(D, sigma) #random effect plus error covariance
self._compute_W() #inv(S)
self._compute_r(a) #residual after removing fixed effects/exogs
self._compute_b(D) #? coefficients on random exog, Z ?
def compute_xtwy(self):
"""
Utility function to compute X^tWY (transposed ?) for Unit instance.
"""
return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ?
def compute_xtwx(self):
"""
Utility function to compute X^tWX for Unit instance.
"""
return np.dot(np.dot(self.X.T, self.W), self.X)
def cov_random(self, D, Sinv=None):
"""
Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example)
"""
if Sinv is not None:
self.compute_P(Sinv)
t = np.dot(self.Z, D)
return D - np.dot(np.dot(t.T, self.P), t)
def logL(self, a, ML=False):
"""
Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a.
"""
if ML:
return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2.
else:
if a is None:
raise ValueError('need fixed effect a for REML contribution to log-likelihood')
r = self.Y - np.dot(self.X, a)
return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2.
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return - 2 * self.logL(ML=ML)
class OneWayMixed(object):
"""
Model for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
units : list of units
the data for the individual units should be attached to the units
response, fixed and random : formula expression, called as argument to Formula
*available results and alias*
(subject to renaming, and coversion to cached attributes)
params() -> self.a : coefficient for fixed effects or exog
cov_params() -> self.Sinv : covariance estimate of fixed effects/exog
bse() : standard deviation of params
cov_random -> self.D : estimate of random effects covariance
params_random_units -> [self.units[...].b] : random coefficient for each unit
*attributes*
(others)
self.m : number of units
self.p : k_vars_fixed
self.q : k_vars_random
self.N : nobs (total)
Notes
-----
Fit returns a result instance, but not all results that use the inherited
methods have been checked.
Parameters need to change: drop formula and we require a naming convention for
the units (currently Y,X,Z). - endog, exog_fe, endog_re ?
logL does not include constant, e.g. sqrt(pi)
llf is for MLE not for REML
convergence criteria for iteration
Currently convergence in the iterative solver is reached if either the loglikelihood
*or* the fixed effects parameter don't change above tolerance.
In some examples, the fixed effects parameters converged to 1e-5 within 150 iterations
while the log likelihood did not converge within 2000 iterations. This might be
the case if the fixed effects parameters are well estimated, but there are still
changes in the random effects. If params_rtol and params_atol are set at a higher
level, then the random effects might not be estimated to a very high precision.
The above was with a misspecified model, without a constant. With a
correctly specified model convergence is fast, within a few iterations
(6 in example).
"""
def __init__(self, units):
self.units = units
self.m = len(self.units)
self.n_units = self.m
self.N = sum(unit.X.shape[0] for unit in self.units)
self.nobs = self.N #alias for now
# Determine size of fixed effects
d = self.units[0].X
self.p = d.shape[1] # d.shape = p
self.k_exog_fe = self.p #alias for now
self.a = np.zeros(self.p, np.float64)
# Determine size of D, and sensible initial estimates
# of sigma and D
d = self.units[0].Z
self.q = d.shape[1] # Z.shape = q
self.k_exog_re = self.q #alias for now
self.D = np.zeros((self.q,)*2, np.float64)
self.sigma = 1.
self.dev = np.inf #initialize for iterations, move it?
def _compute_a(self):
"""fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)).
"""
for unit in self.units:
unit.fit(self.a, self.D, self.sigma)
S = sum([unit.compute_xtwx() for unit in self.units])
Y = sum([unit.compute_xtwy() for unit in self.units])
self.Sinv = L.pinv(S)
self.a = np.dot(self.Sinv, Y)
def _compute_sigma(self, ML=False):
"""
Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual)
"""
sigmasq = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
t = unit.r - np.dot(unit.Z, unit.b)
sigmasq += np.power(t, 2).sum()
sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) -
self.sigma**2 * W)
self.sigma = np.sqrt(sigmasq / self.N)
def _compute_D(self, ML=False):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9).
"""
D = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
D += np.multiply.outer(unit.b, unit.b)
t = np.dot(unit.Z, self.D)
D += self.D - np.dot(np.dot(t.T, W), t)
self.D = D / self.m
def cov_fixed(self):
"""
Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
"""
return self.Sinv
#----------- alias (JP) move to results class ?
def cov_random(self):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D
"""
return self.D
@property
def params(self):
'''
estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a
'''
return self.a
@property
def params_random_units(self):
'''random coefficients for each unit
'''
return np.array([unit.b for unit in self.units])
def cov_params(self):
'''
estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a
'''
return self.cov_fixed()
@property
def bse(self):
'''
standard errors of estimated coefficients for exogeneous variables (fixed)
'''
return np.sqrt(np.diag(self.cov_params()))
#----------- end alias
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return -2 * self.logL(ML=ML)
def logL(self, ML=False):
"""
Return log-likelihood, REML by default.
"""
#I don't know what the difference between REML and ML is here.
logL = 0.
for unit in self.units:
logL += unit.logL(a=self.a, ML=ML)
if not ML:
logL += np.log(L.det(self.Sinv)) / 2
return logL
def initialize(self):
S = sum([np.dot(unit.X.T, unit.X) for unit in self.units])
Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units])
self.a = L.lstsq(S, Y)[0]
D = 0
t = 0
sigmasq = 0
for unit in self.units:
unit.r = unit.Y - np.dot(unit.X, self.a)
if self.q > 1:
unit.b = L.lstsq(unit.Z, unit.r)[0]
else:
Z = unit.Z.reshape((unit.Z.shape[0], 1))
unit.b = L.lstsq(Z, unit.r)[0]
sigmasq += (np.power(unit.Y, 2).sum() -
(self.a * np.dot(unit.X.T, unit.Y)).sum() -
(unit.b * np.dot(unit.Z.T, unit.r)).sum())
D += np.multiply.outer(unit.b, unit.b)
t += L.pinv(np.dot(unit.Z.T, unit.Z))
#TODO: JP added df_resid check
self.df_resid = (self.N - (self.m - 1) * self.q - self.p)
sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
self.sigma = np.sqrt(sigmasq)
self.D = (D - sigmasq * t) / self.m
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
'''convergence check for iterative estimation
'''
self.dev, old = self.deviance(ML=ML), self.dev
#self.history.append(np.hstack((self.dev, self.a)))
self.history['llf'].append(self.dev)
self.history['params'].append(self.a.copy())
self.history['D'].append(self.D.copy())
if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`?
#print np.fabs((self.dev - old)), self.dev, old
self.termination = 'llf'
return False
#break if parameters converged
#TODO: check termination conditions, OR or AND
if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
self.termination = 'params'
return False
self._a_old = self.a.copy()
return True
def fit(self, maxiter=100, ML=False, rtol=1.0e-05, params_rtol=1e-6, params_atol=1e-6):
#initialize for convergence criteria
self._a_old = np.inf * self.a
self.history = {'llf':[], 'params':[], 'D':[]}
for i in range(maxiter):
self._compute_a() #a, Sinv : params, cov_params of fixed exog
self._compute_sigma(ML=ML) #sigma MLE or REML of sigma ?
self._compute_D(ML=ML) #D : covariance of random effects, MLE or REML
if not self.cont(ML=ML, rtol=rtol, params_rtol=params_rtol,
params_atol=params_atol):
break
else: #if end of loop is reached without break
self.termination = 'maxiter'
print('Warning: maximum number of iterations reached')
self.iterations = i
results = OneWayMixedResults(self)
#compatibility functions for fixed effects/exog
results.scale = 1
results.normalized_cov_params = self.cov_params()
return results
class OneWayMixedResults(LikelihoodModelResults):
'''Results class for OneWayMixed models
'''
def __init__(self, model):
#TODO: check, change initialization to more standard pattern
self.model = model
self.params = model.params
#need to overwrite this because we don't have a standard
#model.loglike yet
#TODO: what todo about REML loglike, logL is not normalized
@cache_readonly
def llf(self):
return self.model.logL(ML=True)
@property
def params_random_units(self):
return self.model.params_random_units
def cov_random(self):
return self.model.cov_random()
def mean_random(self, idx='lastexog'):
if idx == 'lastexog':
meanr = self.params[-self.model.k_exog_re:]
elif isinstance(idx, list):
if not len(idx) == self.model.k_exog_re:
raise ValueError('length of idx different from k_exog_re')
else:
meanr = self.params[idx]
else:
meanr = np.zeros(self.model.k_exog_re)
return meanr
def std_random(self):
return np.sqrt(np.diag(self.cov_random()))
def plot_random_univariate(self, bins=None, use_loc=True):
'''create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
fig : matplotlib figure instance
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions.
'''
#outsource this
import matplotlib.pyplot as plt
from scipy.stats import norm as normal
fig = plt.figure()
k = self.model.k_exog_re
if k > 3:
rows, cols = int(np.ceil(k * 0.5)), 2
else:
rows, cols = k, 1
if bins is None:
#bins = self.model.n_units // 20 #TODO: just roughly, check
# bins = np.sqrt(self.model.n_units)
bins = 5 + 2 * self.model.n_units**(1./3.)
if use_loc:
loc = self.mean_random()
else:
loc = [0]*k
scale = self.std_random()
for ii in range(k):
ax = fig.add_subplot(rows, cols, ii)
freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii],
bins=bins, normed=True)
points = np.linspace(bins_[0], bins_[-1], 200)
#ax.plot(points, normal.pdf(points, loc=loc, scale=scale))
#loc of sample is approx. zero, with Z appended to X
#alternative, add fixed to mean
ax.set_title('Random Effect %d Marginal Distribution' % ii)
ax.plot(points,
normal.pdf(points, loc=loc[ii], scale=scale[ii]),
'r')
return fig
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None):
'''create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters
'''
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax_or_fig = fig
re1 = self.params_random_units[:,idx1]
re2 = self.params_random_units[:,idx2]
ax.plot(re1, re2, 'o', alpha=0.75)
if title is None:
title = 'Random Effects %d and %d' % (idx1, idx2)
ax.set_title(title)
ax_or_fig = ax
return ax_or_fig
def plot_scatter_all_pairs(self, title=None):
from statsmodels.graphics.plot_grids import scatter_ellipse
if self.model.k_exog_re < 2:
raise ValueError('less than two variables available')
return scatter_ellipse(self.params_random_units,
ell_kwds={'color':'r'})
#ell_kwds not implemented yet
# #note I have written this already as helper function, get it
# import matplotlib.pyplot as plt
# #from scipy.stats import norm as normal
# fig = plt.figure()
# k = self.model.k_exog_re
# n_plots = k * (k - 1) // 2
# if n_plots > 3:
# rows, cols = int(np.ceil(n_plots * 0.5)), 2
# else:
# rows, cols = n_plots, 1
#
# count = 1
# for ii in range(k):
# for jj in range(ii):
# ax = fig.add_subplot(rows, cols, count)
# self.plot_scatter_pairs(ii, jj, title=None, ax=ax)
# count += 1
#
# return fig
if __name__ == '__main__':
#see examples/ex_mixed_lls_1.py
pass
| bsd-3-clause |
ElDeveloper/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
Garrett-R/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
iismd17/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
UNR-AERIAL/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
Titan-C/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 1 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
# #############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
# #############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
# #############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# #############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
petebachant/PXL | pxl/tests/test_fdiff.py | 1 | 1436 | from __future__ import division, print_function
from .. import fdiff
from ..fdiff import *
import matplotlib.pyplot as plt
import pandas as pd
import os
import numpy as np
from uncertainties import unumpy
plot = False
def test_second_order_diff():
"""Test `second_order_diff`."""
# Create a non-equally spaced x vector
x = np.append(np.linspace(0, np.pi, 100),
np.linspace(np.pi + 0.01, 2*np.pi, 400))
u = np.sin(x)
dudx = second_order_diff(u, x)
assert dudx.shape == u.shape
# Assert that this function is almost identical to cos(x)
np.testing.assert_allclose(dudx, np.cos(x), rtol=1e-3)
if plot:
plt.plot(x, dudx, "-o", lw=2, alpha=0.5)
plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
plt.show()
def test_second_order_diff_uncertainties():
"""Test that `second_order_diff` works with uncertainties."""
# Create a non-equally spaced x vector
x = np.append(np.linspace(0, np.pi, 50),
np.linspace(np.pi + 0.01, 2*np.pi, 100))
x_unc = unumpy.uarray(x, np.ones(len(x))*1e-3)
u = unumpy.uarray(np.sin(x), np.ones(len(x))*1e-2)
dudx = second_order_diff(u, x)
print(dudx[:5])
print(dudx[-5:])
if plot:
plt.errorbar(x, unumpy.nominal_values(dudx), yerr=unumpy.std_devs(dudx),
fmt="-o", lw=2, alpha=0.5)
plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5)
plt.show()
| gpl-3.0 |
JT5D/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 13 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.todense(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/plot_rfe_with_cross_validation.py | 24 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
edfall/vnpy | vn.datayes/storage.py | 29 | 18623 | import os
import json
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from api import Config, PyApi
from api import BaseDataContainer, History, Bar
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError, VNPAST_DatabaseError)
class DBConfig(Config):
"""
Json-like config object; inherits from Config()
Contains all kinds of settings relating to database settings.
privates
--------
Inherited from api.Config, plus:
* client: pymongo.MongoClient object, the connection
that is to be used for this session.
* body: dictionary; the main content of config.
- client: pymongo.MongoClient(), refers to self.client.
- dbs: dictionary, is a mapping from database alias
to another dictionary, which inclues configurations
and themselves(i.e. pymongo.database entity)
Concretely, dbs has the structure like:
{
alias1 : {
'self': client[dbName1],
'index': dbIndex1,
'collNames': collectionNameType1
},
alias2 : {
'self': client[dbName2],
'index': dbIndex2,
'collNames': collectionNameType2
}, ...
}
where alias#: string;
dbs.alias#.self: pymongo.database;
dbs.alias#.index: string;
dbs.alias#.collNames: string;
- dbNames: list; a list of database alias.
"""
head = 'DB config'
client = pymongo.MongoClient()
body = {
'client': client,
'dbs': {
'EQU_M1': {
'self': client['DATAYES_EQUITY_M1'],
'index': 'dateTime',
'collNames': 'secID'
},
'EQU_D1': {
'self': client['DATAYES_EQUITY_D1'],
'index': 'date',
'collNames': 'equTicker'
},
'FUT_D1': {
'self': client['DATAYES_FUTURE_D1'],
'index': 'date',
'collNames': 'futTicker'
},
'OPT_D1': {
'self': client['DATAYES_OPTION_D1'],
'index': 'date',
'collNames': 'optTicker'
},
'FUD_D1': {
'self': client['DATAYES_FUND_D1'],
'index': 'date',
'collNames': 'fudTicker'
},
'IDX_D1': {
'self': client['DATAYES_INDEX_D1'],
'index': 'date',
'collNames': 'idxTicker'
}
},
'dbNames': ['EQU_M1', 'EQU_D1', 'FUT_D1',
'OPT_D1', 'FUD_D1', 'IDX_D1']
}
def __init__(self, head=None, token=None, body=None):
"""
Inherited constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
super(DBConfig, self).__init__(head, token, body)
def view(self):
""" Reloaded Prettify printing method. """
config_view = {
'dbConfig_head' : self.head,
'dbConfig_body' : str(self.body),
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# MongoDB Controller class
class MongodController(object):
"""
The MongoDB controller interface.
MongodController is initialized with a DBConfig configuration
object and a PyApi object, which has already been contructed with
its own Config json. The default version of constructor actually
does nothing special about the database. Yet if user executes shell
script prepare.sh to prepare the connection, MongodController will
firstly gather symbols that are going to become collection names
in corresponding databases. This process is done one database by another,
user can skip useless databases by editing the scripts.
Then, it ensures the index of each collection due to the 'index' value
in DBConfig.body.dbs. Concretely, for D1 bars, the index will be 'date',
and for intraday bars, it will be 'dateTime'; both take the form of
datetime.datetime timestamp.
download() and update() methods of controller dynamically construct
and maintain the databases, requesting data via PyApi. Once the database
is constructed, MongodController can access required data via its fetch()
method.
privates
--------
* _config: DBConfig object; a container of all useful settings for the
databases.
* _api: PyApi object; is responsible for making requests.
* _client: pymongo.MongoClient object; the connection to MongoDB.
* _dbs: dictionary; a mapping from database names to another dictionary,
which includes configurations of the database and the pymongo.database
entity. Inherited from _config.body.['dbs']. Note that keys
self._dbs are mere strings, only self._dbs[key]['self'] refers to the
pymongo.Database object.
* _dbNames: list; a list of names of databases.
* _collNames: dictionary; mapping from self._db[key]['collNames'] attribute
to the names of collections(i.e. tickers) within.
- example: _collNames['equTicker'] = ['000001', '000002', ...]
* _connected: boolean; whether the MongoClient was connected to or not.
* _mapTickersToSecIDs: dictionary; mapping from stock tickers to
its security ID.
example
-------
>> myApi = PyApi(Config())
>> mydbs = DBConfig()
>> controller = MongodController(mydbs, myApi)
>> controller._get_coll_names()
>> controller._ensure_index()
>> controller.download_equity_D1(20130101, 20150801)
>> controller.update_equity_D1()
"""
_config = DBConfig()
_api = None
_client = None
_dbs = None
_dbNames = []
_collNames = dict()
_connected = False
_mapTickersToSecIDs = dict()
def __init__(self, config, api):
"""
Constructor.
parameters
----------
* config: DBConfig object; specifies database configs.
* api: PyApi object.
"""
self._api = api # Set Datayes PyApi.
if config.body:
try:
self._config = config.body
self._client = config.body['client']
self._dbs = config.body['dbs']
self._dbNames = config.body['dbNames']
self._connected = True
except KeyError:
msg = '[MONGOD]: Unable to configure database; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[MONGOD]: Unable to configure database; ' + str(e)
raise VNPAST_ConfigError(msg)
if self._connected:
#self._get_coll_names()
#self._ensure_index()
pass
def view(self):
"""
NOT IMPLEMENTED
"""
return
#----------------------------------------------------------------------
# Get collection names methods.
"""
Decorator;
Targeting at path dName, if exists, read data from this file;
if not, execute handle() which returns a json-like data and
stores the data at dName path.
parameters
----------
* dName: string; the specific path of file that __md looks at.
"""
def __md(dName):
def _md(get):
def handle(*args, **kwargs):
try:
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
data = json.loads(jsonFile.read())
jsonFile.close()
else:
# if not, get data via *get method,
# then write to the file.
data = get(*args, **kwargs)
jsonFile = open(dName, 'w+')
jsonFile.write(json.dumps(data))
jsonFile.close()
#print data
return data
except Exception,e:
raise e
return handle
return _md
@__md('names/equTicker.json')
def _allEquTickers(self):
"""get all equity tickers, decorated by @__md()."""
data = self._api.get_equity_D1()
allEquTickers = list(data.body['ticker'])
return allEquTickers
@__md('names/secID.json')
def _allSecIds(self):
"""get all security IDs, decorated by @__md()."""
data = self._api.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
return allSecIds
@__md('names/futTicker.json')
def _allFutTickers(self):
"""get all future tickers, decorated by @__md()."""
data = self._api.get_future_D1()
allFutTickers = list(data.body['ticker'])
return allFutTickers
@__md('names/optTicker.json')
def _allOptTickers(self):
"""get all option tickers, decorated by @__md()."""
data = self._api.get_option_D1()
allOptTickers = list(data.body['ticker'])
return allOptTickers
@__md('names/fudTicker.json')
def _allFudTickers(self):
"""get all fund tickers, decorated by @__md()."""
data = self._api.get_fund_D1()
allFudTickers = list(data.body['ticker'])
return allFudTickers
@__md('names/idxTicker.json')
def _allIdxTickers(self):
"""get all index tickers, decorated by @__md()."""
data = self._api.get_index_D1()
allIdxTickers = list(data.body['ticker'])
return allIdxTickers
@__md('names/bndTicker.json')
def _allBndTickers(self):
"""get all bond tickers, decorated by @__md()."""
data = self._api.get_bond_D1()
allBndTickers = list(data.body['ticker'])
return allBndTickers
def _get_coll_names(self):
"""
get all instruments'names and store them in self._collNames.
"""
try:
if not os.path.exists('names'):
os.makedirs('names')
self._collNames['equTicker'] = self._allEquTickers()
self._collNames['fudTicker'] = self._allFudTickers()
self._collNames['secID'] = self._allSecIds()
self._collNames['futTicker'] = self._allFutTickers()
self._collNames['optTicker'] = self._allOptTickers()
self._collNames['idxTicker'] = self._allIdxTickers()
print '[MONGOD]: Collection names gotten.'
return 1
except AssertionError:
warning = '[MONGOD]: Warning, collection names ' + \
'is an empty list.'
print warning
except Exception, e:
msg = '[MONGOD]: Unable to set collection names; ' + \
str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Ensure collection index method.
def _ensure_index(self):
"""
Ensure indices for all databases and collections.
first access self._dbs config to get index column names;
then get collection names from self._collNames and loop
over all collections.
"""
if self._collNames and self._dbs:
try:
for dbName in self._dbs:
# Iterate over database configurations.
db = self._dbs[dbName]
dbSelf = db['self']
index = db['index']
collNames = self._collNames[db['collNames']]
# db['self'] is the pymongo.Database object.
for name in collNames:
coll = dbSelf[name]
coll.ensure_index([(index,
pymongo.DESCENDING)], unique=True)
print '[MONGOD]: MongoDB index set.'
return 1
except KeyError:
msg = '[MONGOD]: Unable to set collection indices; ' + \
'infomation in Config.body["dbs"] is incomplete.'
raise VNPAST_DatabaseError(msg)
except Exception, e:
msg = '[MONGOD]: Unable to set collection indices; ' + str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Download method.
def download_equity_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['EQU_D1']['self']
self._api.get_equity_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_equity_M1(self, tasks, startYr=2012, endYr=2015):
"""
"""
try:
# map equity tickers to security IDs.
if self._mapTickersToSecIDs:
maps = self._mapTickersToSecIDs
else:
assert os.isfile('./names/secID.json')
jsonFile = open(dName,'r')
allSecIds = json.loads(jsonFile.read())
jsonFile.close()
allTickers = [s.split('.')[0] for s in allSecIds]
maps = dict(zip(allTickers, allSecIds))
self._mapTickersToSecIDs = maps
tasks_ = [maps[task] for task in tasks]
db = self._dbs['EQU_M1']['self']
self._api.get_equity_M1_interMonth(db, id=1,
startYr = startYr,
endYr = endYr,
tasks = tasks_)
except AssertionError:
msg = '[MONGOD]: Cannot map tickers to secIDs; ' + \
'secID.json does not exist.'
raise VNPAST_DatabaseError(msg)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_bond_D1(self, start, end, sessionNum=30):
"""
"""
pass
def download_future_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['FUT_D1']['self']
self._api.get_future_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_option_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['OPT_D1']['self']
self._api.get_option_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_index_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['IDX_D1']['self']
self._api.get_index_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_fund_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['FUD_D1']['self']
self._api.get_fund_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Update methods.
def __update(self, key, target1, target2, sessionNum):
"""
Basic update method.
Looks into the database specified by 'key', find the latest
record in the collection of it. Then update the collections
till last trading date.
parameters
----------
* key: string; a database alias (refer to the database config)
e.g., 'EQU_D1'.
* target1: method; pointer to the function with which controller
obtain all tickers in the database. Concretely, target1 are
self._all#Tickers methods.
* target2: method; pointer to the api overlord requesting functions
i.e. self._api.get_###_mongod methods.
* sessionNum: integer; the number of threads.
"""
try:
# get databases and tickers
db = self._dbs[key]['self']
index = self._dbs[key]['index']
allTickers = target1()
coll = db[allTickers[0]]
# find the latest timestamp in collection.
latest = coll.find_one(
sort=[(index, pymongo.DESCENDING)])[index]
start = datetime.strftime(
latest + timedelta(days=1),'%Y%m%d')
end = datetime.strftime(datetime.now(), '%Y%m%d')
# then download.
target2(db, start, end, sessionNum)
return db
except Exception, e:
msg = '[MONGOD]: Unable to update data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def update_equity_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'EQU_D1',
target1 = self._allEquTickers,
target2 = self._api.get_equity_D1_mongod,
sessionNum = sessionNum)
return db
def update_future_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'FUT_D1',
target1 = self._allFutTickers,
target2 = self._api.get_future_D1_mongod,
sessionNum = sessionNum)
return db
def update_option_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'OPT_D1',
target1 = self._allOptTickers,
target2 = self._api.get_option_D1_mongod,
sessionNum = sessionNum)
return db
def update_index_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'IDX_D1',
target1 = self._allIdxTickers,
target2 = self._api.get_index_D1_mongod,
sessionNum = sessionNum)
return db
def update_fund_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'FUD_D1',
target1 = self._allFudTickers,
target2 = self._api.get_fund_D1_mongod,
sessionNum = sessionNum)
return db
#----------------------------------------------------------------------#
# stuff that will be deprecated
def update_equity_D1_(self, sessionNum=30):
"""
"""
try:
# set databases and tickers
db = self._dbs['EQU_D1']['self']
index = self._dbs['EQU_D1']['index']
allEquTickers = self._allEquTickers()
coll = db[allEquTickers[0]]
# find the latest timestamp in collection.
latest = coll.find_one(
sort=[(index, pymongo.DESCENDING)])[index]
start = datetime.strftime(latest + timedelta(days=1),'%Y%m%d')
end = datetime.strftime(datetime.now(), '%Y%m%d')
# then download.
self._api.get_equity_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to update data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def update_equity_M1(self):
"""
"""
pass
#----------------------------------------------------------------------
# Fetch method.
def fetch(self, dbName, ticker, start, end, output='list'):
"""
"""
# check inputs' validity.
if output not in ['df', 'list', 'json']:
raise ValueError('[MONGOD]: Unsupported output type.')
if dbName not in self._dbNames:
raise ValueError('[MONGOD]: Unable to locate database name.')
db = self._dbs[dbName]
dbSelf = db['self']
dbIndex = db['index']
try:
coll = db[ticker]
if len(start)==8 and len(end)==8:
# yyyymmdd, len()=8
start = datetime.strptime(start, '%Y%m%d')
end = datetime.strptime(end, '%Y%m%d')
elif len(start)==14 and len(end)==14:
# yyyymmdd HH:MM, len()=14
start = datetime.strptime(start, '%Y%m%d %H:%M')
end = datetime.strptime(end, '%Y%m%d %H:%M')
else:
pass
docs = []
# find in MongoDB.
for doc in coll.find(filter={dbIndex: {'$lte': end,
'$gte': start}}, projection={'_id': False}):
docs.append(doc)
if output == 'list':
return docs[::-1]
except Exception, e:
msg = '[MONGOD]: Error encountered when fetching data' + \
'from MongoDB; '+ str(e)
return -1
if __name__ == '__main__':
dc = DBConfig()
api = PyApi(Config())
mc = MongodController(dc, api)
mc.update_index_D1()
| mit |
tomlof/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
craigcitro/pydatalab | solutionbox/code_free_ml/test_mltoolbox/test_transform.py | 2 | 8503 | from __future__ import absolute_import
from __future__ import print_function
import json
import os
import pandas as pd
from PIL import Image
import shutil
from six.moves.urllib.request import urlopen
import subprocess
import tempfile
import unittest
import uuid
import tensorflow as tf
from tensorflow.python.lib.io import file_io
import google.datalab as dl
import google.datalab.bigquery as bq
import google.datalab.storage as storage
CODE_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'mltoolbox', 'code_free_ml'))
# Some tests put files in GCS or use BigQuery. If HAS_CREDENTIALS is false,
# those tests will not run.
HAS_CREDENTIALS = True
try:
dl.Context.default().project_id
except Exception:
HAS_CREDENTIALS = False
class TestTransformRawData(unittest.TestCase):
"""Tests for applying a saved model"""
@classmethod
def setUpClass(cls):
# Set up dirs.
cls.working_dir = tempfile.mkdtemp()
cls.source_dir = os.path.join(cls.working_dir, 'source')
cls.analysis_dir = os.path.join(cls.working_dir, 'analysis')
cls.output_dir = os.path.join(cls.working_dir, 'output')
file_io.create_dir(cls.source_dir)
# Make test image files.
img1_file = os.path.join(cls.source_dir, 'img1.jpg')
image1 = Image.new('RGBA', size=(300, 300), color=(155, 0, 0))
image1.save(img1_file)
img2_file = os.path.join(cls.source_dir, 'img2.jpg')
image2 = Image.new('RGBA', size=(50, 50), color=(125, 240, 0))
image2.save(img2_file)
img3_file = os.path.join(cls.source_dir, 'img3.jpg')
image3 = Image.new('RGBA', size=(800, 600), color=(33, 55, 77))
image3.save(img3_file)
# Download inception checkpoint. Note that gs url doesn't work because
# we may not have gcloud signed in when running the test.
url = ('https://storage.googleapis.com/cloud-ml-data/img/' +
'flower_photos/inception_v3_2016_08_28.ckpt')
checkpoint_path = os.path.join(cls.working_dir, "checkpoint")
response = urlopen(url)
with open(checkpoint_path, 'wb') as f:
f.write(response.read())
# Make csv input file
cls.csv_input_filepath = os.path.join(cls.source_dir, 'input.csv')
file_io.write_string_to_file(
cls.csv_input_filepath,
'1,1,Monday,23.0,%s\n' % img1_file +
'2,0,Friday,18.0,%s\n' % img2_file +
'3,0,Sunday,12.0,%s\n' % img3_file)
# Call analyze.py to create analysis results.
schema = [{'name': 'key_col', 'type': 'INTEGER'},
{'name': 'target_col', 'type': 'FLOAT'},
{'name': 'cat_col', 'type': 'STRING'},
{'name': 'num_col', 'type': 'FLOAT'},
{'name': 'img_col', 'type': 'STRING'}]
schema_file = os.path.join(cls.source_dir, 'schema.json')
file_io.write_string_to_file(schema_file, json.dumps(schema))
features = {'key_col': {'transform': 'key'},
'target_col': {'transform': 'target'},
'cat_col': {'transform': 'one_hot'},
'num_col': {'transform': 'identity'},
'img_col': {'transform': 'image_to_vec', 'checkpoint': checkpoint_path}}
features_file = os.path.join(cls.source_dir, 'features.json')
file_io.write_string_to_file(features_file, json.dumps(features))
cmd = ['python ' + os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + cls.analysis_dir,
'--csv=' + cls.csv_input_filepath,
'--schema=' + schema_file,
'--features=' + features_file]
subprocess.check_call(' '.join(cmd), shell=True)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.working_dir)
def test_local_csv_transform(self):
"""Test transfrom from local csv files."""
cmd = ['python ' + os.path.join(CODE_PATH, 'transform.py'),
'--csv=' + self.csv_input_filepath,
'--analysis=' + self.analysis_dir,
'--prefix=features',
'--output=' + self.output_dir]
print('cmd ', ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
# Read the tf record file. There should only be one file.
record_filepath = os.path.join(self.output_dir,
'features-00000-of-00001.tfrecord.gz')
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
serialized_examples = list(tf.python_io.tf_record_iterator(record_filepath, options=options))
self.assertEqual(len(serialized_examples), 3)
# Find the example with key=1 in the file.
first_example = None
for ex in serialized_examples:
example = tf.train.Example()
example.ParseFromString(ex)
if example.features.feature['key_col'].int64_list.value[0] == 1:
first_example = example
self.assertIsNotNone(first_example)
transformed_number = first_example.features.feature['num_col'].float_list.value[0]
self.assertAlmostEqual(transformed_number, 23.0)
# transformed category = row number in the vocab file.
transformed_category = first_example.features.feature['cat_col'].int64_list.value[0]
vocab = pd.read_csv(
os.path.join(self.analysis_dir, 'vocab_cat_col.csv'),
header=None,
names=['label', 'count'],
dtype=str)
origional_category = vocab.iloc[transformed_category]['label']
self.assertEqual(origional_category, 'Monday')
image_bytes = first_example.features.feature['img_col'].float_list.value
self.assertEqual(len(image_bytes), 2048)
self.assertTrue(any(x != 0.0 for x in image_bytes))
@unittest.skipIf(not HAS_CREDENTIALS, 'GCS access missing')
def test_local_bigquery_transform(self):
"""Test transfrom locally, but the data comes from bigquery."""
# Make a BQ table, and insert 1 row.
try:
bucket_name = 'temp_pydatalab_test_%s' % uuid.uuid4().hex
bucket_root = 'gs://%s' % bucket_name
bucket = storage.Bucket(bucket_name)
bucket.create()
project_id = dl.Context.default().project_id
dataset_name = 'test_transform_raw_data_%s' % uuid.uuid4().hex
table_name = 'tmp_table'
dataset = bq.Dataset((project_id, dataset_name)).create()
table = bq.Table((project_id, dataset_name, table_name))
table.create([{'name': 'key_col', 'type': 'INTEGER'},
{'name': 'target_col', 'type': 'FLOAT'},
{'name': 'cat_col', 'type': 'STRING'},
{'name': 'num_col', 'type': 'FLOAT'},
{'name': 'img_col', 'type': 'STRING'}])
img1_file = os.path.join(self.source_dir, 'img1.jpg')
dest_file = os.path.join(bucket_root, 'img1.jpg')
file_io.copy(img1_file, dest_file)
data = [
{
'key_col': 1,
'target_col': 1.0,
'cat_col': 'Monday',
'num_col': 23.0,
'img_col': dest_file,
},
]
table.insert(data=data)
cmd = ['python ' + os.path.join(CODE_PATH, 'transform.py'),
'--bigquery=%s.%s.%s' % (project_id, dataset_name, table_name),
'--analysis=' + self.analysis_dir,
'--prefix=features',
'--project-id=' + project_id,
'--output=' + self.output_dir]
print('cmd ', ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
# Read the tf record file. There should only be one file.
record_filepath = os.path.join(self.output_dir,
'features-00000-of-00001.tfrecord.gz')
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
serialized_examples = list(tf.python_io.tf_record_iterator(record_filepath, options=options))
self.assertEqual(len(serialized_examples), 1)
example = tf.train.Example()
example.ParseFromString(serialized_examples[0])
transformed_number = example.features.feature['num_col'].float_list.value[0]
self.assertAlmostEqual(transformed_number, 23.0)
transformed_category = example.features.feature['cat_col'].int64_list.value[0]
self.assertEqual(transformed_category, 2)
image_bytes = example.features.feature['img_col'].float_list.value
self.assertEqual(len(image_bytes), 2048)
self.assertTrue(any(x != 0.0 for x in image_bytes))
finally:
dataset.delete(delete_contents=True)
for obj in bucket.objects():
obj.delete()
bucket.delete()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
barjacks/pythonrecherche | 07 Selenium, more beautifulsoup/zh_wie_neu.py | 1 | 1387 |
# coding: utf-8
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
import progressbar
bar = progressbar.ProgressBar()
lst = []
lst_pass = []
for elem,i in zip(range(1697,13000), bar((range(1697,13000)))):
url = "https://www.zueriwieneu.ch/report/" + str(elem)
response = requests.get(url)
züri_soup = BeautifulSoup(response.text, 'html.parser')
if züri_soup.find('h1').text != 'Melden Sie Schäden an der Infrastruktur von Zürich':
Mini_dict = {
'Kategorie' : züri_soup.find('h1').text,
'Meldedatum' : züri_soup.find('div', {'class':'problem-header clearfix'}).find('p').text.strip(),
'Meldung' : züri_soup.find('div', {'class':'problem-header clearfix'}).find_all('p')[1],
'Antwortdatum' : züri_soup.find('ul', {'class':'item-list item-list--updates'}).find_all('p')[0].text,
'Antwort' : züri_soup.find('ul', {'class':'item-list item-list--updates'}).find_all('p')[1].text,
'URL' : url,
'Lat' : float(züri_soup.find('div', {'id':'js-map-data'}).get('data-latitude')),
'Long': float(züri_soup.find('div', {'id':'js-map-data'}).get('data-longitude'))
}
lst.append(Mini_dict)
else:
lst_pass.append(url)
date = time.strftime("%Y-%m-%d%H:%M:%S")
pd.DataFrame(lst).to_csv(date+'züriwieneu.csv')
| mit |
mne-tools/mne-tools.github.io | 0.21/_downloads/33b5e3cff5c172d72c79c6eec192b031/plot_label_from_stc.py | 20 | 4093 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
# 8.5% of original source space vertices were omitted during forward
# calculation, suppress the warning here with verbose='error'
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True,
verbose='error')
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
glouppe/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
freeman-lab/dask | dask/tests/test_core.py | 7 | 3541 | from dask.utils import raises
from dask.core import (istask, get, get_dependencies, flatten, subs,
preorder_traversal)
def contains(a, b):
"""
>>> contains({'x': 1, 'y': 2}, {'x': 1})
True
>>> contains({'x': 1, 'y': 2}, {'z': 3})
False
"""
return all(a.get(k) == v for k, v in b.items())
def inc(x):
return x + 1
def add(x, y):
return x + y
def test_istask():
assert istask((inc, 1))
assert not istask(1)
assert not istask((1, 2))
d = {':x': 1,
':y': (inc, ':x'),
':z': (add, ':x', ':y')}
def test_preorder_traversal():
t = (add, 1, 2)
assert list(preorder_traversal(t)) == [add, 1, 2]
t = (add, (add, 1, 2), (add, 3, 4))
assert list(preorder_traversal(t)) == [add, add, 1, 2, add, 3, 4]
t = (add, (sum, [1, 2]), 3)
assert list(preorder_traversal(t)) == [add, sum, list, 1, 2, 3]
def test_get():
assert get(d, ':x') == 1
assert get(d, ':y') == 2
assert get(d, ':z') == 3
assert get(d, 'pass-through') == 'pass-through'
def test_memoized_get():
try:
import toolz
except ImportError:
return
cache = dict()
getm = toolz.memoize(get, cache=cache, key=lambda args, kwargs: args[1:])
result = getm(d, ':z', get=getm)
assert result == 3
assert contains(cache, {(':x',): 1,
(':y',): 2,
(':z',): 3})
def test_data_not_in_dict_is_ok():
d = {'x': 1, 'y': (add, 'x', 10)}
assert get(d, 'y') == 11
def test_get_with_list():
d = {'x': 1, 'y': 2, 'z': (sum, ['x', 'y'])}
assert get(d, ['x', 'y']) == [1, 2]
assert get(d, 'z') == 3
def test_get_with_nested_list():
d = {'x': 1, 'y': 2, 'z': (sum, ['x', 'y'])}
assert get(d, [['x'], 'y']) == [[1], 2]
assert get(d, 'z') == 3
def test_get_works_with_unhashables_in_values():
f = lambda x, y: x + len(y)
d = {'x': 1, 'y': (f, 'x', set([1]))}
assert get(d, 'y') == 2
def test_get_laziness():
def isconcrete(arg):
return isinstance(arg, list)
d = {'x': 1, 'y': 2, 'z': (isconcrete, ['x', 'y'])}
assert get(d, ['x', 'y']) == [1, 2]
assert get(d, 'z') == False
def test_get_dependencies_nested():
dsk = {'x': 1, 'y': 2,
'z': (add, (inc, [['x']]), 'y')}
assert get_dependencies(dsk, 'z') == set(['x', 'y'])
def test_get_dependencies_empty():
dsk = {'x': (inc,)}
assert get_dependencies(dsk, 'x') == set()
def test_nested_tasks():
d = {'x': 1,
'y': (inc, 'x'),
'z': (add, (inc, 'x'), 'y')}
assert get(d, 'z') == 4
def test_get_stack_limit():
d = dict(('x%s' % (i+1), (inc, 'x%s' % i)) for i in range(10000))
d['x0'] = 0
assert get(d, 'x10000') == 10000
# introduce cycle
d['x5000'] = (inc, 'x5001')
assert raises(RuntimeError, lambda: get(d, 'x10000'))
assert get(d, 'x4999') == 4999
def test_flatten():
assert list(flatten(())) == []
assert list(flatten('foo')) == ['foo']
def test_subs():
assert subs((sum, [1, 'x']), 'x', 2) == (sum, [1, 2])
assert subs((sum, [1, ['x']]), 'x', 2) == (sum, [1, [2]])
def test_subs_with_unfriendly_eq():
try:
import numpy as np
except:
return
else:
task = (np.sum, np.array([1, 2]))
assert (subs(task, (4, 5), 1) == task) is True
def test_subs_with_surprisingly_friendly_eq():
try:
import pandas as pd
except:
return
else:
df = pd.DataFrame()
assert subs(df, 'x', 1) is df
| bsd-3-clause |
latticelabs/Mitty | mitty/benchmarking/misalignment_plot.py | 1 | 9184 | """Prepare a binned matrix of misalignments and plot it in different ways"""
import click
import pysam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
import numpy as np
def we_have_too_many_bins(bins):
return sum([len(bb) for bb in bins]) > 5000 # This is our threshold for too many bins to compute
def autoscale_bin_size(chrom_lens, bin_cnt=100.0):
return int(sum(chrom_lens) / bin_cnt)
def compute_misalignment_matrix_from_bam(bam_fp, bin_size=None, i_know_what_i_am_doing=False):
"""Create a matrix of binned mis-alignments
:param bam_fp: input BAM
:param bin_size: size of bin in mega bases
:param i_know_what_i_am_doing: Set this to override the runtime warning of too many bins
"""
def binnify(_pos, _bins):
for n in range(1, len(_bins)):
if _pos < _bins[n]:
return n - 1
return len(_bins) - 1 # Should not get here
chrom_lens = [hdr['LN'] for hdr in bam_fp.header['SQ']]
bin_size = bin_size * 1e6 if bin_size is not None else autoscale_bin_size(chrom_lens)
bins = [np.array(range(0, hdr['LN'], bin_size) + [hdr['LN']], dtype=int) for hdr in bam_fp.header['SQ']]
if not i_know_what_i_am_doing and we_have_too_many_bins(bins):
raise RuntimeWarning('The number of bins will be very large. '
'If you are sure you want to do this, '
'use the --i-know-what-i-am-doing flag.')
bin_centers = [(bb[:-1] + bb[1:]) / 2.0 for bb in bins]
# Rows = source (correct pos) Cols = destination (aligned pos)
matrices = [[np.zeros(shape=(len(bins[j]) - 1, len(bins[i]) - 1), dtype='uint32') for i in range(len(bins))] for j in range(len(bins))]
# TAG TYPE VALUE
# XR i Aligned chromosome
# XP i Aligned pos
for r in bam_fp:
c_chrom, c_pos, a_chrom, a_pos = r.reference_id, r.pos, r.get_tag('XR'), r.get_tag('XP')
c_pos_binned, a_pos_binned = binnify(c_pos, bins[c_chrom]), binnify(a_pos, bins[a_chrom])
matrices[c_chrom][a_chrom][c_pos_binned, a_pos_binned] += 1
return chrom_lens, bins, bin_centers, matrices
def plot_genome_as_a_circle(ax, chrom_lens, chrom_gap=np.pi / 50, chrom_radius=1.0, chrom_thick=5, r_max=1.05):
"""Plot the chromosomes on a circle."""
total_len = sum(chrom_lens)
radians_per_base = (2.0 * np.pi - len(chrom_lens) * chrom_gap) / total_len # With allowance for chrom gaps
theta_stops, x_ticks, x_tick_labels = [], [], []
delta_radian = 0.01
start_radian = 0
for ch_no, l in enumerate(chrom_lens):
end_radian = start_radian + l * radians_per_base
theta = np.arange(start_radian, end_radian, delta_radian)
theta_stops.append((start_radian, end_radian))
ax.plot(theta, [chrom_radius * 1.01] * theta.size, lw=chrom_thick, zorder=-1) # , color=[.3, .3, .3])
x_ticks.append((start_radian + end_radian)/2)
x_tick_labels.append(str(ch_no + 1))
start_radian = end_radian + chrom_gap
plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels)
ax.set_rmax(r_max)
return theta_stops
def plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops,
chrom_radius=1.0, scaling_factor=0.01):
scaling_factor *= 0.01
# http://matplotlib.org/users/path_tutorial.html
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_origin, range_bp_dest = float(chrom_lens[i]), float(chrom_lens[j])
offset_origin, offset_dest = theta_stops[i][0], theta_stops[j][0]
range_origin, range_dest = theta_stops[i][1] - theta_stops[i][0], theta_stops[j][1] - theta_stops[j][0]
scale_origin, scale_dest = range_origin / range_bp_origin, range_dest / range_bp_dest
c_origin, c_dest = offset_origin + bin_centers[i] * scale_origin, offset_dest + bin_centers[j] * scale_dest
this_origin, this_dest = np.tile(c_origin, c_dest.shape[0]), np.repeat(c_dest, c_origin.shape[0])
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
for ii in idx:
t0, t1 = this_origin[ii], this_dest[ii]
this_radius = max(min(1.0, abs(t1 - t0) / np.pi), 0.05) * chrom_radius
vertices = [
(t0, chrom_radius), # P0
(t0, chrom_radius - this_radius), # P1
(t1, chrom_radius - this_radius), # P2
(t1, chrom_radius), # P3
]
path = Path(vertices, codes)
patch = patches.PathPatch(path, facecolor='none', lw=scaling_factor * mat_flat[ii])
ax.add_patch(patch)
def circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor):
"""Plot the confusion matrix as a circle plot."""
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
theta_stops = plot_genome_as_a_circle(ax, chrom_lens)
plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=scaling_factor)
def plot_genome_as_a_square(ax, bins, chrom_gap=1000, chrom_thick=5):
"""Plot the chromosomes on a matrix."""
start_pos, linear_stops, x_ticks, x_tick_labels = chrom_gap, [], [], []
for ch_no, b in enumerate(bins):
linear_stops.append([start_pos, start_pos + b[-1]])
ax.plot([x + start_pos for x in b], [0 for _ in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
ax.plot([0 for _ in b], [x + start_pos for x in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1)
x_ticks.append((start_pos + start_pos + b[-1]) / 2)
x_tick_labels.append(str(ch_no + 1))
start_pos += b[-1] + chrom_gap
#plt.setp(ax.get_yticklabels(), visible=False)
ax.grid(False)
plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels, yticks=x_ticks, yticklabels=x_tick_labels)
return linear_stops
def plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=1.0, plot_grid=True):
for i in range(len(bins)):
for j in range(len(bins)):
mat = matrices[i][j]
range_bp_x, range_bp_y = float(chrom_lens[i]), float(chrom_lens[j])
offset_x, offset_y = linear_stops[i][0], linear_stops[j][0]
range_x, range_y = linear_stops[i][1] - linear_stops[i][0], linear_stops[j][1] - linear_stops[j][0]
scale_x, scale_y = range_x / range_bp_x, range_y / range_bp_y
cx, cy = offset_x + bin_centers[i] * scale_x, offset_y + bin_centers[j] * scale_y
this_x, this_y = np.tile(cx, cy.shape[0]), np.repeat(cy, cx.shape[0])
if plot_grid: ax.plot(this_x, this_y, '.', color=(0.8, 0.8, 0.8), ms=2, zorder=-1)
mat_flat = mat.ravel()
idx, = mat_flat.nonzero()
if idx.size > 0:
ax.scatter(this_x[idx], this_y[idx], mat_flat[idx] * scaling_factor, facecolors='none')
def matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=True):
"""Plot the confusion matrix as a ... matrix."""
fig = plt.figure()
ax = fig.add_subplot(111)
linear_stops = plot_genome_as_a_square(ax, bins, chrom_gap=max(chrom_lens) * 0.1)
plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops,
scaling_factor=scaling_factor, plot_grid=plot_grid)
plt.setp(ax, aspect=1, xlabel='Correct', ylabel='Aligned')
def is_grid_too_dense(bins):
return sum([len(bb) for bb in bins]) > 100 # This is our threshold for too dense a grid to show
def auto_scale_scaling_factor(matrices, scale=1000.0):
return scale / max([matrices[i][j].max() for i in range(len(matrices)) for j in range(len(matrices[i]))])
@click.command()
@click.argument('badbam', type=click.Path(exists=True))
@click.option('--circle', type=click.Path(), help='Name of figure file for circle plot')
@click.option('--matrix', type=click.Path(), help='Name of figure file for matrix plot')
@click.option('--bin-size', type=float, default=None, help='Bin size in Mb. Omit to auto-scale')
@click.option('--scaling-factor', type=float, default=None, help='Scale size of disks/lines in plot. Omit to auto-scale')
@click.option('--i-know-what-i-am-doing', is_flag=True, help='Override bin density safety')
def cli(badbam, circle, matrix, bin_size, scaling_factor, i_know_what_i_am_doing):
"""Prepare a binned matrix of mis-alignments and plot it in different ways"""
chrom_lens, bins, bin_centers, matrices = \
compute_misalignment_matrix_from_bam(pysam.AlignmentFile(badbam, 'rb'),
bin_size=bin_size, i_know_what_i_am_doing=i_know_what_i_am_doing)
scaling_factor = scaling_factor or auto_scale_scaling_factor(matrices)
if circle is not None:
circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor)
plt.savefig(circle)
if matrix is not None:
matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor,
plot_grid=not is_grid_too_dense(bins))
plt.savefig(matrix)
if __name__ == '__main__':
cli() | gpl-2.0 |
idf/scipy_util | scipy_util/image/color_kmeans.py | 1 | 2314 | # USAGE
# python color_kmeans.py --image images/jp.png --clusters 3
# Author: Adrian Rosebrock
# Website: www.pyimagesearch.com
# import the necessary packages
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import argparse
import cv2
def centroid_histogram(clt):
# grab the number of different clusters and create a histogram
# based on the number of pixels assigned to each cluster
numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)
(hist, _) = np.histogram(clt.labels_, bins = numLabels)
# normalize the histogram, such that it sums to one
hist = hist.astype("float")
hist /= hist.sum()
# return the histogram
return hist
def plot_colors(hist, centroids):
# initialize the bar chart representing the relative frequency
# of each of the colors
bar = np.zeros((50, 300, 3), dtype = "uint8")
startX = 0
# loop over the percentage of each cluster and the color of
# each cluster
for (percent, color) in zip(hist, centroids):
# plot the relative percentage of each cluster
endX = startX + (percent * 300)
cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),
color.astype("uint8").tolist(), -1)
startX = endX
# return the bar chart
return bar
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
ap.add_argument("-c", "--clusters", required=True, type=int,
help="# of clusters")
args = vars(ap.parse_args())
# load the image and convert it from BGR to RGB so that
# we can dispaly it with matplotlib
image = cv2.imread(args["image"])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show our image
plt.figure()
plt.axis("off")
plt.imshow(image)
# reshape the image to be a list of pixels
image = image.reshape((image.shape[0]*image.shape[1], 3))
# cluster the pixel intensities
clt = KMeans(n_clusters=args["clusters"])
clt.fit(image)
# build a histogram of clusters and then create a figure
# representing the number of pixels labeled to each color
hist = centroid_histogram(clt)
bar = plot_colors(hist, clt.cluster_centers_)
# show our color bart
plt.figure()
plt.axis("off")
plt.imshow(bar)
plt.show() | bsd-3-clause |
robogen/CMS-Mining | RunScripts/es_mainreduce.py | 1 | 20005 | from elasticsearch import Elasticsearch
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import numpy as np
import datetime as dt
import math
import json
with open('sites.json', 'r+') as txt:
sitesArray = json.load(txt)
with open('cms.json', 'r+') as txt:
cmsLocate = json.load(txt)
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
def conAtlasTime(time):
return (dt.datetime.strptime(time, '%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp()
def utcDate(time):
return dt.datetime.fromtimestamp(time, dt.timezone.utc)
esAtlas = Elasticsearch([{
'host': contents[2], 'port': contents[3]
}], timeout=50)
esHCC = Elasticsearch([{
'host': contents[0], 'port': contents[1]
}], timeout=50)
scrollPreserve="3m"
startDate = "2016-07-17T00:00:00"
endDate = "2016-07-25T00:00:00"
tenMin = np.multiply(10,60)
loc = {}
loc["location"] = np.array([])
def atlasLatency(srcSite, destSite):
queryAtlas={"query" :
{"bool": {
"must": [
{"match" :
{"_type" : "latency" }
},
{"match" :
{"srcSite" : srcSite }
},
{"match" :
{"destSite" : destSite }
},
{"range" : {
"timestamp" : {
#"gt" : int(conAtlasTime(startDate)),
#"lt" : int(conAtlasTime(endDate))
"gt" : startDate,
"lt" : endDate
}
}}
]
}
}
}
scannerAtlas = esAtlas.search(index="network_weather_2-*",
body=queryAtlas,
search_type="scan",
scroll=scrollPreserve)
scrollIdAtlas = scannerAtlas['_scroll_id']
atlasTotalRec = scannerAtlas["hits"]["total"]
arrRet = np.array([])
if atlasTotalRec == 0:
return None
else:
while atlasTotalRec > 0:
responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas,
scroll=scrollPreserve)
for hit in responseAtlas["hits"]["hits"]:
tempRay = None # Initialize
if hit["_source"]["src"] == hit["_source"]["MA"]: # means MA is the src site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["delay_mean"]),
float(hit["_source"]["delay_median"]),
float(hit["_source"]["delay_sd"]),
float(0.0)])
elif hit["_source"]["dest"] == hit["_source"]["MA"]: # means MA is the dest site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["delay_mean"]),
float(hit["_source"]["delay_median"]),
float(hit["_source"]["delay_sd"]),
float(1.0)])
else:
raise NameError('MA is not the src or dest')
if arrRet.size == 0:
arrRet = np.reshape(tempRay, (1,6))
else:
arrRet = np.vstack((arrRet, tempRay))
atlasTotalRec -= len(responseAtlas['hits']['hits'])
arrRet.view('f8,f8,f8,f8,f8,f8').sort(order=['f0'], axis=0)
return arrRet
def atlasPacketLoss(srcSite, destSite):
queryAtlas={"query" :
{"bool": {
"must": [
{"match" :
{"_type" : "packet_loss_rate"}
},
{"match" :
{"srcSite" : srcSite }
},
{"match" :
{"destSite" : destSite }
},
{"range" : {
"timestamp" : {
#"gt" : int(conAtlasTime(startDate)),
#"lt" : int(conAtlasTime(endDate))
"gt" : startDate,
"lt" : endDate
}
}}
]
}
}
}
scannerAtlas = esAtlas.search(index="network_weather_2-*",
body=queryAtlas,
search_type="scan",
scroll=scrollPreserve)
scrollIdAtlas = scannerAtlas['_scroll_id']
atlasTotalRec = scannerAtlas["hits"]["total"]
arrRet = np.array([])
if atlasTotalRec == 0:
return None
else:
while atlasTotalRec > 0:
responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas,
scroll=scrollPreserve)
for hit in responseAtlas["hits"]["hits"]:
tempRay = None # Initialize
if hit["_source"]["src"] == hit["_source"]["MA"]: # means MA is the src site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["packet_loss"]),
float(0.0)])
elif hit["_source"]["dest"] == hit["_source"]["MA"]: # means MA is the dest site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["packet_loss"]),
float(1.0)])
else:
raise NameError('MA is not src or dest')
if arrRet.size == 0:
arrRet = np.reshape(tempRay, (1, 4))
else:
arrRet = np.vstack((arrRet, tempRay))
atlasTotalRec -= len(responseAtlas['hits']['hits'])
arrRet.view('f8,f8,f8,f8').sort(order=['f0'], axis=0)
return arrRet
def atlasThroughput(srcSite, destSite):
queryAtlas={"query" :
{"bool": {
"must": [
{"match" :
{"_type" : "throughput"}
},
{"match" :
{"srcSite" : srcSite }
},
{"match" :
{"destSite" : destSite }
},
{"range" : {
"timestamp" : {
#"gt" : int(conAtlasTime(startDate)),
#"lt" : int(conAtlasTime(endDate))
"gt" : startDate,
"lt" : endDate
}
}}
]
}
}
}
scannerAtlas = esAtlas.search(index="network_weather_2-*",
body=queryAtlas,
search_type="scan",
scroll=scrollPreserve)
scrollIdAtlas = scannerAtlas['_scroll_id']
atlasTotalRec = scannerAtlas["hits"]["total"]
arrRet = np.array([])
if atlasTotalRec == 0:
return None
else:
while atlasTotalRec > 0:
responseAtlas = esAtlas.scroll(scroll_id=scrollIdAtlas,
scroll=scrollPreserve)
for hit in responseAtlas["hits"]["hits"]:
tempRay = None #Initialize in local context
if hit["_source"]["src"] == hit["_source"]["MA"]: # Means MA is the src site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["throughput"]),
float(0.0)])
elif hit["_source"]["dest"] == hit["_source"]["MA"]: #Means MA is the dest site
tempRay = np.array([#hit["_source"]["timestamp"],
#hit["_source"]["timestamp"]
conAtlasTime(hit["_source"]["timestamp"]),
conAtlasTime(hit["_source"]["timestamp"])
- np.multiply(4, np.multiply(60, 60)),
float(hit["_source"]["throughput"]),
float(1.0)])
else:
raise NameError('MA is not src or dest')
if arrRet.size == 0:
arrRet = np.reshape(tempRay, (1, 4))
else:
arrRet = np.vstack((arrRet, tempRay))
atlasTotalRec -= len(responseAtlas['hits']['hits'])
arrRet.view('f8,f8,f8,f8').sort(order=['f0'], axis=0)
return arrRet
def hccQuery(site):
queryHCC={"query" :
{"bool": {
"must": [
{"match" :
{"CMS_JobType" : "Processing"}
},
{"range" :
{"EventRate" : {"gte" : "0"}}
},
#{"match" :
# {"TaskType" : "Production"}
#},
{"range" : {
"CompletionDate" : {
"gt" : int(conAtlasTime(startDate)),
"lt" : int(conAtlasTime(endDate))
}
}},
{"match" :
{"DataLocationsCount" : 1}
},
{"match" :
{"Site" : site }
},
{"match" :
{"InputData" : "Offsite"}
}
]
}
}
}
scannerHCC = esHCC.search(index="cms-*",
doc_type="job",
body=queryHCC,
search_type="scan",
scroll=scrollPreserve)
scrollIdHCC = scannerHCC['_scroll_id']
countHitsHCC = scannerHCC["hits"]["total"]
arrRet = {}
if countHitsHCC == 0:
return None
else:
while countHitsHCC > 0:
responseHCC = esHCC.scroll(scroll_id=scrollIdHCC,
scroll=scrollPreserve)
for hit in responseHCC["hits"]["hits"]:
location = hit["_source"]["DataLocations"]
if str(location[0]).lower() in cmsLocate["locations"]:
tempHolder = np.array([hit["_source"]["CpuEff"],
#hit["_source"]["EventRate"],
hit["_source"]["ChirpCMSSWEventRate"],
hit["_source"]["JobCurrentStartDate"],
hit["_source"]["JobFinishedHookDone"],
hit["_source"]["CpuTimeHr"],
hit["_source"]["WallClockHr"],
hit["_source"]["RequestCpus"],
hit["_source"]["MemoryMB"],
hit["_source"]["QueueHrs"],
hit["_source"]["RequestMemory"],
hit["_source"]["CoreHr"],
hit["_source"]["CpuBadput"],
hit["_source"]["KEvents"]])
if not str(location[0]) in loc["location"]:
loc["location"] = np.append(loc["location"],
str(location[0]))
arrRet[str(location[0])] = np.reshape(tempHolder, (1,13))
else:
arrRet[str(location[0])] = np.vstack((arrRet[str(location[0])],tempHolder))
countHitsHCC -= len(responseHCC['hits']['hits'])
for hit in arrRet:
#print(arrRet)
#tempRay = arrRet[str(hit)]
#arrRet[str(hit)] = tempRay[tempRay[:,2].argsort()]
#arrRet[str(hit)] = sorted(arrRet[str(hit)], key=lambda x : x[2])
arrRet[str(hit)].view('f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8').sort(order=['f2'], axis=0)
return arrRet
with PdfPages('CMS_Plots.pdf') as pp:
d = pp.infodict()
d['Title'] = 'CMS Grid Plots'
d['Author'] = u'Jerrod T. Dixon\xe4nen'
d['Subject'] = 'Plot of network affects on grid jobs'
d['Keywords'] = 'PdfPages matplotlib CMS grid'
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
for hit in cmsLocate["locations"]:
loc["location"] = np.array([])
hccResult = hccQuery(hit)
for note in loc["location"]:
atlasT = None #atlasThroughput(sitesArray[hit], sitesArray[note.lower()])
atlasP = atlasPacketLoss(sitesArray[hit], sitesArray[note.lower()])
atlasL = atlasLatency(sitesArray[hit], sitesArray[note.lower()])
tempArr = hccResult[note]
arrCpu = np.array([]);
arrEvent = np.array([]);
arrStart = np.array([]);
arrEnd = np.array([]);
for tpl in tempArr:
arrCpu = np.append(arrCpu, tpl[0]);
arrEvent = np.append(arrEvent, tpl[1]);
arrStart = np.append(arrStart, utcDate(tpl[2]));
arrEnd = np.append(arrEnd, utcDate(tpl[3]));
figH, axH = plt.subplots(2, sharex=True)
axH[1].xaxis.set_major_formatter(AutoDateFormatter(locator=AutoDateLocator(),
defaultfmt="%m-%d %H:%M"))
figH.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
axH[0].plot(arrStart, arrCpu, 'bs')
axH[0].hlines(arrCpu,
arrStart,
arrEnd)
axH[0].set_ylabel("CpuEff")
axH[0].set_title(str("2016 From " + hit + " To " + note))
axH[1].plot(arrStart, arrEvent, 'bs')
axH[1].hlines(arrEvent,
arrStart,
arrEnd)
axH[1].set_ylabel("EventRate")
pp.savefig(figH)
plt.close(figH)
#axA[2].xaxis.set_major_formatter(AutoDateFormatter(locator=AutoDateLocator(),
# defaultfmt="%m-%d %H:%M"))
if not type(atlasP) == type(None):
#tDate = np.array([])
#tDatef = np.array([])
#tPut = np.array([])
pDate = np.array([])
pDatef = np.array([])
pLoss = np.array([])
#for tpl in atlasT:
# tDate = np.append(tDate, tpl[0])
# tDatef = np.append(tDatef, tpl[1])
# tPut = np.append(tPut, tpl[2])
for tpl in atlasP:
pDate = np.append(pDate, tpl[0])
pDatef = np.append(pDatef, tpl[1])
pLoss = np.append(pLoss, tpl[2])
figA, axA = plt.subplots(2, sharex=True)
axA[0].set_title(str("2016 From " + \
hit + " (" + \
sitesArray[hit] + \
")" + " To " + \
note + " (" + sitesArray[note.lower()] + ")"))
figA.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
axA[0].plot(pDate, pLoss, 'bs')
axA[0].set_ylabel("Packet Loss")
axA[0].hlines(pLoss,
pDatef,
pDate)
#axA[1].set_ylabel("Throughput")
#axA[1].plot(tDate, tPut, 'bs')
#axA[1].hlines(tPut,
# tDatef,
# tDate)
pp.savefig(figA)
plt.close(figA)
if not type(atlasL) == type(None):
lDate = np.array([])
lDatef = np.array([])
lMean = np.array([])
lMedian = np.array([])
lStd = np.array([])
for tpl in atlasL:
lDate = np.append(lDate, tpl[0])
lDatef = np.append(lDatef, tpl[1])
lMean = np.append(lMean, tpl[2])
lMedian = np.append(lMedian, tpl[3])
lStd = np.append(lStd, tpl[4])
figL, axL = plt.subplots(3, sharex=True)
axL[0].set_title(str("2016 Latency From " + \
hit + " (" + \
sitesArray[hit] + \
")" + " To " + \
note + " (" + sitesArray[note.lower()] + ")"))
figL.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
axL[0].set_ylabel("Mean")
axL[0].plot(lDate, lMean, 'bs', label="delay_mean")
axL[0].hlines(lMean,
lDatef,
lDate)
axL[1].set_ylabel("Median")
axL[1].plot(lDate, lMedian, 'rs', label="delay_median")
axL[1].hlines(lMedian,
lDatef,
lDate)
axL[2].set_ylabel("Std. Dev")
axL[2].plot(lDate, lStd, 'g^', label="delay_sd")
axL[2].hlines(lStd,
lDatef,
lDate)
pp.savefig(figL)
plt.close(figL)
| mit |
psci2195/espresso-ffans | samples/lb_profile.py | 1 | 2856 | # Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simulate the flow of a lattice-Boltzmann fluid past a cylinder,
obtain the velocity profile in polar coordinates and compare it
to the analytical solution.
"""
import numpy as np
import matplotlib.pyplot as plt
import espressomd
required_features = ["CUDA", "LB_BOUNDARIES_GPU"]
espressomd.assert_features(required_features)
import espressomd.lb
import espressomd.observables
import espressomd.shapes
import espressomd.lbboundaries
import espressomd.accumulators
system = espressomd.System(box_l=[10.0, 10.0, 5.0])
system.time_step = 0.01
system.cell_system.skin = 0.4
n_steps = 500
lb_fluid = espressomd.lb.LBFluidGPU(
agrid=1.0, dens=1.0, visc=1.0, tau=0.01, ext_force_density=[0, 0, 0.15], kT=1.0, seed=32)
system.actors.add(lb_fluid)
system.thermostat.set_lb(LB_fluid=lb_fluid, seed=23)
fluid_obs = espressomd.observables.CylindricalLBVelocityProfile(
center=[5.0, 5.0, 0.0],
axis='z',
n_r_bins=100,
n_phi_bins=1,
n_z_bins=1,
min_r=0.0,
max_r=4.0,
min_phi=-np.pi,
max_phi=np.pi,
min_z=0.0,
max_z=10.0,
sampling_delta_x=0.05,
sampling_delta_y=0.05,
sampling_delta_z=1.0)
cylinder_shape = espressomd.shapes.Cylinder(
center=[5.0, 5.0, 5.0],
axis=[0, 0, 1],
direction=-1,
radius=4.0,
length=20.0)
cylinder_boundary = espressomd.lbboundaries.LBBoundary(shape=cylinder_shape)
system.lbboundaries.add(cylinder_boundary)
system.integrator.run(n_steps)
accumulator = espressomd.accumulators.MeanVarianceCalculator(obs=fluid_obs)
system.auto_update_accumulators.add(accumulator)
system.integrator.run(n_steps)
lb_fluid_profile = accumulator.get_mean()
lb_fluid_profile = np.reshape(lb_fluid_profile, (100, 1, 1, 3))
def poiseuille_flow(r, R, ext_force_density):
return ext_force_density * 1. / 4 * (R**2.0 - r**2.0)
# Please note that due to symmetry and interpolation, a plateau is seen
# near r=0.
n_bins = len(lb_fluid_profile[:, 0, 0, 2])
r_max = 4.0
r = np.linspace(0.0, r_max, n_bins)
plt.plot(r, lb_fluid_profile[:, 0, 0, 2], label='LB profile')
plt.plot(r, poiseuille_flow(r, r_max, 0.15), label='analytical solution')
plt.legend()
plt.show()
| gpl-3.0 |
adamgreenhall/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
pypot/scikit-learn | sklearn/preprocessing/tests/test_label.py | 48 | 18419 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_array_equal(out_2, binaryclass_array)
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_array_equal(out_2, indicator)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
czhengsci/veidt | veidt/utils/data_selection.py | 1 | 4503 | # coding: utf-8
# Copyright (c) Materials Virtual Lab
# Distributed under the terms of the BSD License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
import random
import numpy as np
import pandas as pd
from copy import copy
from pymatgen import Structure
class MonteCarloSampler(object):
"""
Sample a subset from the dataset to achieve some criteria using simulated annealing.
For example, one needs to subset the data so that a fraction
of the data can already cover a large feature space,
i.e., maximizing the distances.
"""
def __init__(self, datasets, num_samples, cost_function):
"""
Sample a subset with size num_samples from datasets
to minimize the cost function.
Args:
datasets (numpy.array): The total datasets.
num_samples (int): Number of samples from the data.
cost_function (function): Function that takes into
a subset of the data and calculate a cost.
"""
self.datasets = datasets
self.num_samples = num_samples
self.cost_function = cost_function
self.num_total = len(datasets)
self.num_remain = self.num_total - num_samples
self.index_selected = list(np.random.choice(
self.num_total, num_samples, replace=False))
self._get_remain_index()
self.cost = self.compute_cost(self.datasets[self.index_selected, :])
self.accepted = 0
self.rejected = 0
self.cost_history = []
self.cost_history.append(self.cost)
def _get_remain_index(self):
self.index_remain = sorted(list(set(range(self.num_total)) -
set(self.index_selected)))
def compute_cost(self, data_subset):
"""
Compute the cost of data subsets.
Args:
data_subset (numpy.array): Data subset.
"""
return self.cost_function(data_subset)
def sample(self, num_attempts, t_init, t_final):
"""
Metropolis sampler. For every sampling attempt, one data entry is
swapped with the data reservior. Then the energy difference is evaluated.
If dE < 0, the swapping is accepted. If dE > 0, then it is accepted with
probability exp(-dE / T), where T is some artificial temperature. We can
start with a relatively large T, and then reduce it with sampling process
going on.
Args:
num_attempts (int): Number of sampling attempts.
t_init (float): Initial temperature.
t_final (float): Final temperature.
"""
temperatures = np.linspace(t_init, t_final, num_attempts)
for i in range(num_attempts):
temperature = temperatures[i]
index = random.choice(self.index_selected)
index_remain = random.choice(self.index_remain)
self.update(index, index_remain, temperature)
self.cost_history.append(self.cost)
def update(self, index, index_remain, temperature):
"""
Implement the data swap, if it is accepted.
Args:
index (int): The index of selected feature matrix
used for swapping.
index_remain (int): The index of remaining feature matrix
used for swapping.
temperature (float): Artificial temperature.
"""
new_selected = copy(self.index_selected)
new_selected.remove(index)
new_selected.append(index_remain)
cost_after_swap = self.compute_cost(self.datasets[new_selected, :])
d_cost = cost_after_swap - self.cost
accept = self.decision(d_cost, temperature)
if accept:
self.index_selected = copy(new_selected)
self._get_remain_index()
self.cost = cost_after_swap
else:
pass
def decision(self, d_cost, temperature):
"""
Decision on accepting the data swap.
Args:
d_cost (float): Difference between cost in proposed move.
temperature (float): Temperature.
"""
if d_cost < 0:
self.accepted += 1
return True
else:
p = np.exp(-d_cost / temperature)
p2 = np.random.rand(1)
if p2 < p:
self.accepted += 1
return True
else:
self.rejected += 1
return False | bsd-3-clause |
to266/hyperspy | hyperspy/drawing/marker.py | 2 | 4900 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
from hyperspy.events import Event, Events
class MarkerBase(object):
"""Marker that can be added to the signal figure
Attributes
----------
marker_properties : dictionary
Accepts a dictionary of valid (i.e. recognized by mpl.plot)
containing valid line properties. In addition it understands
the keyword `type` that can take the following values:
{'line', 'text'}
"""
def __init__(self):
# Data attributes
self.data = None
self.axes_manager = None
self.ax = None
self.auto_update = True
# Properties
self.marker = None
self._marker_properties = {}
# Events
self.events = Events()
self.events.closed = Event("""
Event triggered when a marker is closed.
Arguments
---------
marker : Marker
The marker that was closed.
""", arguments=['obj'])
self._closing = False
@property
def marker_properties(self):
return self._marker_properties
@marker_properties.setter
def marker_properties(self, kwargs):
for key, item in kwargs.items():
if item is None and key in self._marker_properties:
del self._marker_properties[key]
else:
self._marker_properties[key] = item
if self.marker is not None:
plt.setp(self.marker, **self.marker_properties)
try:
# self.ax.figure.canvas.draw()
self.ax.hspy_fig._draw_animated()
except:
pass
def set_marker_properties(self, **kwargs):
"""
Set the line_properties attribute using keyword
arguments.
"""
self.marker_properties = kwargs
def set_data(self, x1=None, y1=None,
x2=None, y2=None, text=None, size=None):
"""
Set data to the structured array. Each field of data should have
the same dimensions than the nagivation axes. The other fields are
overwritten.
"""
self.data = np.array((np.array(x1), np.array(y1),
np.array(x2), np.array(y2),
np.array(text), np.array(size)),
dtype=[('x1', object), ('y1', object),
('x2', object), ('y2', object),
('text', object), ('size', object)])
self._is_marker_static()
def add_data(self, **kwargs):
"""
Add data to the structured array. Each field of data should have
the same dimensions than the nagivation axes. The other fields are
not changed.
"""
if self.data is None:
self.set_data(**kwargs)
else:
for key in kwargs.keys():
self.data[key][()] = np.array(kwargs[key])
self._is_marker_static()
def isiterable(self, obj):
return not isinstance(obj, (str, bytes)) and hasattr(obj, '__iter__')
def _is_marker_static(self):
test = [self.isiterable(self.data[key].item()[()]) is False
for key in self.data.dtype.names]
if np.alltrue(test):
self.auto_update = False
else:
self.auto_update = True
def get_data_position(self, ind):
data = self.data
if data[ind].item()[()] is None:
return None
elif self.isiterable(data[ind].item()[()]) and self.auto_update:
indices = self.axes_manager.indices[::-1]
return data[ind].item()[indices]
else:
return data[ind].item()[()]
def close(self):
if self._closing:
return
self._closing = True
try:
self.marker.remove()
self.events.closed.trigger(obj=self)
for f in self.events.closed.connected:
self.events.closed.disconnect(f)
# m.ax.figure.canvas.draw()
self.ax.hspy_fig._draw_animated()
except:
pass
| gpl-3.0 |
JohanComparat/pySU | spm/bin_spiders/spiders_last_burst_vs_radius.py | 1 | 5578 | import astropy.cosmology as co
aa=co.Planck15
import astropy.io.fits as fits
import astropy.units as u
from astropy.coordinates import angles
#import AngularSeparation
from astropy import coordinates as coord
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import os
import sys
import ClusterScalingRelations as clsr
from scipy.interpolate import interp1d
import StellarMass as sm
smhmr = sm.StellarMass()
scl = clsr.ClusterScalingRelations_Mantz2016()
cat = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1.fits.gz'))[1].data
spm = fits.open(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'validatedclusters_catalogue_2016-07-04-DR14_version_round1-v4_Xmass-v1_spm.fits'))[1].data
volume_rough = aa.comoving_volume(0.5)*2200.*n.pi/129600
volume = volume_rough.value
# get cluster center
# distance to center
# rescale to r200c_deg
# get the latest min(ages) of the ssp
# compute SFR
# now looks at individual galaxies
# and gets the highest SFR for each galaxy
# youngest age
highest_sfrs = []
youngest_ages = []
sep_r200c = []
for cc in cat:
center = coord.ICRS(ra=cc['RA_OPT']*u.degree, dec=cc['DEC_OPT']*u.degree)
gal = (spm['CLUS_ID']==cc['CLUS_ID'])
#all_members = coord.ICRS()
#separations = center.separation(all_members)/(cc['R200C_DEG']*u.degree)).value
for id_cc, (pla, mjd, fib) in enumerate(zip(cc['ALLPLATE'][:len(gal.nonzero()[0])], cc['ALLMJD'][:len(gal.nonzero()[0])], cc['ALLFIBERID'][:len(gal.nonzero()[0])])):
sel = (gal) & (spm['PLATE']==pla) & (spm['MJD']==mjd) & (spm['FIBERID']==fib)
if len(sel.nonzero()[0])>0 :
n_cp = spm['Chabrier_MILES_nComponentsSSP'][sel].astype('int')[0]
if n_cp > 0 :
all_ages = n.array([ spm['Chabrier_MILES_age_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ])
all_masses = n.array([ spm['Chabrier_MILES_stellar_mass_ssp_'+str(ii)][sel][0] for ii in n.arange(n_cp) ])
sfr_inst = all_masses / all_ages
youngest_ages.append(n.min(all_ages))
highest_sfrs.append(n.max(sfr_inst))
position = coord.ICRS(cc['ALLRA'][id_cc]*u.degree, cc['ALLDEC'][id_cc]*u.degree)
sep_r200c.append( (center.separation(position)/(cc['R200C_DEG']*u.degree)).value )
highest_sfrs = n.array(highest_sfrs)
youngest_ages = n.array(youngest_ages)
sep_r200c = n.array(sep_r200c)
p.figure(1, (5,5))
p.title('SPIDERS')
p.plot(sep_r200c, highest_sfrs, 'r+')
p.xlabel('r/r200c')
p.ylabel('SFR [Msun/yr]')
#p.xscale('log')
p.yscale('log')
p.xlim((0.08,1.5))
p.grid()
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR.png'))
p.clf()
dx = ( n.max(sep_r200c) - n.min(sep_r200c) ) /3.
r_b = n.arange(n.min(sep_r200c), n.max(sep_r200c) + dx, dx)
p.figure(1, (5,5))
for ii,bb in enumerate(r_b[:-1]):
sub = (sep_r200c>bb)&(sep_r200c<r_b[ii+1])
p.hist(highest_sfrs[sub], label=str(n.round(bb,3))+"<"+str(n.round(r_b[ii+1],3)), cumulative=True, normed=True, histtype='step')
p.ylabel('normed cumulative distribution')
p.xlabel('SFR [Msun/yr]')
p.xscale('log')
p.ylim((-0.01, 1.01))
p.grid()
p.legend(frameon=False, loc=0)
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-SFR-histograms.png'))
p.clf()
p.figure(1, (5,5))
p.title('SPIDERS')
p.plot(sep_r200c, youngest_ages, 'r+')
p.xlabel('r/r200c')
p.ylabel('age [yr]')
p.xscale('log')
p.yscale('log')
p.xlim((0.1,5))
p.grid()
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'disteance-2-center-AGE.png'))
p.clf()
p.figure(1, (5,5))
p.title('SPIDERS DR14 galaxies')
p.plot(spm['Z'], spm["Chabrier_MILES_stellar_mass"], 'b,', label='targets')
p.plot(z, y, 'r,', label='cluster members')
p.xlabel('redshift')
p.ylabel('stellar mass [Msun]')
#p.xscale('log')
p.yscale('log')
p.xlim((0,0.7))
p.ylim((1e9,1e12))
p.grid()
p.legend(frameon=False, loc=0)
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'redshift-mass.png'))
p.clf()
logm2x = n.hstack((m2x))
bins=n.arange(-7, 0.5, 0.1)
basis = (n.isnan(logm2x)==False)&(logm2x != -n.inf)&(logm2x != n.inf)
arbitrary_factor =5.
p.figure(1, (5,5))
ok = (basis)&(x>1e44)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44')
ok = (basis)&(x>10**44.5)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>44.5')
ok = (basis)&(x>1e45)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='LX>45')
ok = (basis)&(m200c>10**14)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='M200c>14', ls='dashed')
ok = (basis)&(m200c>10**15)
out = n.log10(n.histogram(logm2x[ok], bins=bins)[0])
p.plot((bins[1:]+bins[:-1])/2., n.log10(out/arbitrary_factor), label='M200c>15', ls='dashed')
xs = n.arange(-7, 0.01, 0.01)
logfsat= lambda logxi, a, b, logN0, exponent : n.log10( 10**logN0 * (10**logxi)**a)# * n.e**(-b*(10**logxi)**exponent))
p.plot(xs, logfsat(xs, -0.81, 5.81, -2.25, -2.54), label='-0.81')
p.plot(xs, logfsat(xs, -0.18, 5.81, -1.2, -.54), label='-0.18')
p.xlabel('log10(SMHMR(stellar mass) / HaloMass(Lx ray))')
p.ylabel('histogram')
#p.xscale('log')
#p.yscale('log')
p.ylim((-1.5, 0.5))
p.xlim((-4,0))
p.grid()
p.legend(frameon=False, loc=0)
p.savefig(os.path.join(os.environ['DATA_DIR'], 'spiders', 'cluster', 'LX-mass-histogram.png'))
p.clf()
| cc0-1.0 |
ryanpbrewster/SciVis-2015 | examples/sdf_example.py | 1 | 2689 | """
The Example is from http://darksky.slac.stanford.edu/scivis2015/examples.html
"""
from sdfpy import load_sdf
from thingking import loadtxt
prefix = "../data/"
# Load N-body particles from a = 1.0 dataset. Particles have positions with
# units of proper kpc, and velocities with units of km/s.
particles = load_sdf(prefix+"ds14_scivis_0128_e4_dt04_1.0000")
# Load the a=1 Rockstar hlist file. The header of the file lists the useful
# units/information.
scale, id, desc_scale, desc_id, num_prog, pid, upid, desc_pid, phantom, \
sam_mvir, mvir, rvir, rs, vrms, mmp, scale_of_last_MM, vmax, x, y, z, \
vx, vy, vz, Jx, Jy, Jz, Spin, Breadth_first_ID, Depth_first_ID, \
Tree_root_ID, Orig_halo_ID, Snap_num, Next_coprogenitor_depthfirst_ID, \
Last_progenitor_depthfirst_ID, Rs_Klypin, M_all, M200b, M200c, M500c, \
M2500c, Xoff, Voff, Spin_Bullock, b_to_a, c_to_a, A_x, A_y, A_z, \
b_to_a_500c, c_to_a_500c, A_x_500c, A_y_500c, A_z_500c, T_over_U, \
M_pe_Behroozi, M_pe_Diemer, Macc, Mpeak, Vacc, Vpeak, Halfmass_Scale, \
Acc_Rate_Inst, Acc_Rate_100Myr, Acc_Rate_Tdyn = \
loadtxt(prefix+"rockstar/hlists/hlist_1.00000.list", unpack=True)
# Now we want to convert the proper kpc of the particle position to comoving
# Mpc/h, a common unit used in computational cosmology in general, but
# specifically is used as the output unit in the merger tree halo list loaded
# in above. First we get the Hubble parameter, here stored as 'h_100' in the
# SDF parameters. Then we load the simulation width, L0, which is also in
# proper kpc. Finally we load the scale factor, a, which for this particular
# snapshot is equal to 1 since we are loading the final snapshot from the
# simulation.
h_100 = particles.parameters['h_100']
width = particles.parameters['L0']
cosmo_a = particles.parameters['a']
kpc_to_Mpc = 1./1000
sl = slice(0,None)
# Define a simple function to convert proper to comoving Mpc/h.
convert_to_cMpc = lambda proper: (proper + width/2.) * h_100 * kpc_to_Mpc / cosmo_a
# Plot all the particles, adding a bit of alpha so that we see the density of
# points.
import matplotlib.pylab as pl
pl.figure(figsize=[10,10])
pl.scatter(convert_to_cMpc(particles['x'][sl]),
convert_to_cMpc(particles['y'][sl]), color='b', s=1.0, alpha=0.05)
# Plot all the halos in red.
pl.scatter(x, y, color='r', alpha=0.1)
# Add some labels
pl.xlabel('x [cMpc/h]')
pl.ylabel('y [cMpc/h]')
pl.savefig("halos_and_particles.png", bbox_inches='tight')
# Could now consider coloring halos by any of the various quantities above.
# Perhaps mvir would be nice to show the virial Mass of the halo, or we could
# scale the points by the virial radius, rvir.
| mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/__init__.py | 9 | 2140 | # pylint: disable-msg=W0614,W0401,W0611,W0622
__docformat__ = 'restructuredtext'
try:
from pandas import hashtable, tslib, lib
except ImportError as e: # pragma: no cover
module = str(e).lstrip('cannot import name ') # hack but overkill to use re
raise ImportError("C extension: {0} not built. If you want to import "
"pandas from the source directory, you may need to run "
"'python setup.py build_ext --inplace' to build the C "
"extensions first.".format(module))
from datetime import datetime
import numpy as np
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
except Exception: # pragma: no cover
pass
# numpy versioning
from distutils.version import LooseVersion
_np_version = np.version.short_version
_np_version_under1p8 = LooseVersion(_np_version) < '1.8'
_np_version_under1p9 = LooseVersion(_np_version) < '1.9'
from pandas.info import __doc__
if LooseVersion(_np_version) < '1.7.0':
raise ImportError('pandas {0} is incompatible with numpy < 1.7.0, '
'your numpy version is {1}. Please upgrade numpy to'
' >= 1.7.0 to use pandas version {0}'.format(__version__,
_np_version))
# let init-time option registration happen
import pandas.core.config_init
from pandas.core.api import *
from pandas.sparse.api import *
from pandas.stats.api import *
from pandas.tseries.api import *
from pandas.io.api import *
from pandas.computation.api import *
from pandas.tools.merge import merge, concat, ordered_merge
from pandas.tools.pivot import pivot_table, crosstab
from pandas.tools.plotting import scatter_matrix, plot_params
from pandas.tools.tile import cut, qcut
from pandas.tools.util import to_numeric
from pandas.core.reshape import melt
from pandas.util.print_versions import show_versions
import pandas.util.testing
# use the closest tagged version if possible
from ._version import get_versions
v = get_versions()
__version__ = v.get('closest-tag',v['version'])
del get_versions, v
| gpl-2.0 |
harisbal/pandas | pandas/tests/arrays/categorical/test_missing.py | 1 | 3078 | # -*- coding: utf-8 -*-
import collections
import numpy as np
import pytest
from pandas.compat import lrange
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import Categorical, Index, isna
import pandas.util.testing as tm
class TestCategoricalMissing(object):
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0],
dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0],
dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(['a', 'b', np.nan])
result = c._set_dtype(CategoricalDtype(['a', 'c']))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1],
dtype='int8'))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize('fillna_kwargs, msg', [
(dict(value=1, method='ffill'),
"Cannot specify both 'value' and 'method'."),
(dict(),
"Must specify a fill 'value' or 'method'."),
(dict(method='bad'),
"Invalid fill method. Expecting .* bad"),
])
def test_fillna_raises(self, fillna_kwargs, msg):
# https://github.com/pandas-dev/pandas/issues/19682
cat = Categorical([1, 2, 3])
with tm.assert_raises_regex(ValueError, msg):
cat.fillna(**fillna_kwargs)
@pytest.mark.parametrize("named", [True, False])
def test_fillna_iterable_category(self, named):
# https://github.com/pandas-dev/pandas/issues/21097
if named:
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
cat = Categorical([Point(0, 0), Point(0, 1), None])
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
| bsd-3-clause |
justincassidy/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
b-carter/numpy | numpy/core/tests/test_multiarray.py | 1 | 260744 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import functools
import ctypes
import os
import gc
from contextlib import contextmanager
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
from .test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
# should always be sufficiently aligned)
if align is not None:
raise ValueError("object array alignment not supported")
return np.zeros(shape, dtype=dtype, order=order)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
class TestFlags(object):
def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(object):
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except Exception as e:
raise RuntimeError(e)
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
assert_raises(ValueError, make_array, 4, 4, -2)
assert_raises(ValueError, make_array, 4, 2, -1)
assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
def test_unicode_assignment(self):
# gh-5049
from numpy.core.numeric import set_string_function
@contextmanager
def inject_str(s):
""" replace ndarray.__str__ temporarily """
set_string_function(lambda x: s, repr=False)
try:
yield
finally:
set_string_function(None, repr=False)
a1d = np.array([u'test'])
a0d = np.array(u'done')
with inject_str(u'bad'):
a1d[0] = a0d # previously this would invoke __str__
assert_equal(a1d[0], u'done')
# this would crash for the same reason
np.array([np.array(u'\xe5\xe4\xf6')])
def test_stringlike_empty_list(self):
# gh-8902
u = np.array([u'done'])
b = np.array([b'done'])
class bad_sequence(object):
def __getitem__(self): pass
def __len__(self): raise RuntimeError
assert_raises(ValueError, operator.setitem, u, 0, [])
assert_raises(ValueError, operator.setitem, b, 0, [])
assert_raises(ValueError, operator.setitem, u, 0, bad_sequence())
assert_raises(ValueError, operator.setitem, b, 0, bad_sequence())
def test_longdouble_assignment(self):
# only relevant if longdouble is larger than float
# we're looking for loss of precision
# gh-8902
tinyb = np.nextafter(np.longdouble(0), 1)
tinya = np.nextafter(np.longdouble(0), -1)
tiny1d = np.array([tinya])
assert_equal(tiny1d[0], tinya)
# scalar = scalar
tiny1d[0] = tinyb
assert_equal(tiny1d[0], tinyb)
# 0d = scalar
tiny1d[0, ...] = tinya
assert_equal(tiny1d[0], tinya)
# 0d = 0d
tiny1d[0, ...] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
# scalar = 0d
tiny1d[0] = tinyb[...]
assert_equal(tiny1d[0], tinyb)
arr = np.array([np.array(tinya)])
assert_equal(arr[0], tinya)
class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
assert_(np.dtype('<i4') != np.dtype('>i4'))
assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
class TestZeroRank(object):
def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
assert_equal(a[...], 0)
assert_equal(b[...], 'x')
assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
assert_equal(a[()], 0)
assert_equal(b[()], 'x')
assert_(type(a[()]) is a.dtype.type)
assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[0], b)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
assert_equal(a, 42)
b[...] = ''
assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
assert_equal(a, 42)
b[()] = ''
assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
assert_raises(IndexError, assign, a, 0, 42)
assert_raises(IndexError, assign, b, 0, '')
assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
assert_raises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(object):
def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
assert_equal(a[...], 0)
assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
assert_equal(a[()], 0)
assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
assert_raises(IndexError, lambda x: x[0], a)
assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
assert_equal(a[np.newaxis].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ...].shape, (1,))
assert_equal(a[..., np.newaxis].shape, (1,))
assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
assert_raises(IndexError, subscript, a, (np.newaxis, 0))
assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(object):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, object)
assert_equal(np.array([4, 2**80, 4]).dtype, object)
assert_equal(np.array([2**80, 4]).dtype, object)
assert_equal(np.array([2**80] * 3).dtype, object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
# New in 1.12: This behavior changes in 1.13, test for dep warning
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
with assert_warns(FutureWarning):
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
# check for no uninitialized memory due to viewing S0 array
assert_equal(xx[:].dtype, xx.dtype)
assert_array_equal(eval(repr(xx), dict(array=np.array)), xx)
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
assert_(a1 is b1)
assert_(np.array([True])[0] is a1)
assert_(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=bool)
c = builtins.sum(l)
assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
assert_equal(np.count_nonzero(a), c)
av *= 4
assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, a.prod)
assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
assert_raises(ValueError, lambda: a.transpose(0))
assert_raises(ValueError, lambda: a.transpose(0, 0))
assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_void_sort(self):
# gh-8210 - previously segfaulted
for i in range(4):
arr = np.empty(1000, 'V4')
arr[::-1].sort()
dt = np.dtype([('val', 'i4', (1,))])
for i in range(4):
arr = np.empty(1000, dt)
arr[::-1].sort()
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
for kind in ['q', 'm', 'h']:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
assert_raises_regex(ValueError, 'duplicate',
lambda: r.sort(order=['id', 'id']))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
assert_equal(np.partition(d, 5, kind=k)[5], 5)
assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
assert_equal(np.partition(d, 6, kind=k)[6], 6)
assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(np.AxisError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(np.AxisError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(np.AxisError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(np.AxisError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_dot_out_mem_overlap(self):
np.random.seed(1)
# Test BLAS and non-BLAS code paths, including all dtypes
# that dot() supports
dtypes = [np.dtype(code) for code in np.typecodes['All']
if code not in 'USVM']
for dtype in dtypes:
a = np.random.rand(3, 3).astype(dtype)
# Valid dot() output arrays must be aligned
b = _aligned_zeros((3, 3), dtype=dtype)
b[...] = np.random.rand(3, 3)
y = np.dot(a, b)
x = np.dot(a, b, out=b)
assert_equal(x, y, err_msg=repr(dtype))
# Check invalid output array
assert_raises(ValueError, np.dot, a, b, out=b[::2])
assert_raises(ValueError, np.dot, a, b, out=b.T)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
# - defer if other has __array_ufunc__ and it is None
# or other is not a subclass and has higher array priority
# - else, call ufunc
def test_ufunc_binop_interaction(self):
# Python method name (without underscores)
# -> (numpy ufunc, has_in_place_version, preferred_dtype)
ops = {
'add': (np.add, True, float),
'sub': (np.subtract, True, float),
'mul': (np.multiply, True, float),
'truediv': (np.true_divide, True, float),
'floordiv': (np.floor_divide, True, float),
'mod': (np.remainder, True, float),
'divmod': (np.divmod, False, float),
'pow': (np.power, True, int),
'lshift': (np.left_shift, True, int),
'rshift': (np.right_shift, True, int),
'and': (np.bitwise_and, True, int),
'xor': (np.bitwise_xor, True, int),
'or': (np.bitwise_or, True, int),
# 'ge': (np.less_equal, False),
# 'gt': (np.less, False),
# 'le': (np.greater_equal, False),
# 'lt': (np.greater, False),
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
class Coerced(Exception):
pass
def array_impl(self):
raise Coerced
def op_impl(self, other):
return "forward"
def rop_impl(self, other):
return "reverse"
def iop_impl(self, other):
return "in-place"
def array_ufunc_impl(self, ufunc, method, *args, **kwargs):
return ("__array_ufunc__", ufunc, method, args, kwargs)
# Create an object with the given base, in the given module, with a
# bunch of placeholder __op__ methods, and optionally a
# __array_ufunc__ and __array_priority__.
def make_obj(base, array_priority=False, array_ufunc=False,
alleged_module="__main__"):
class_namespace = {"__array__": array_impl}
if array_priority is not False:
class_namespace["__array_priority__"] = array_priority
for op in ops:
class_namespace["__{0}__".format(op)] = op_impl
class_namespace["__r{0}__".format(op)] = rop_impl
class_namespace["__i{0}__".format(op)] = iop_impl
if array_ufunc is not False:
class_namespace["__array_ufunc__"] = array_ufunc
eval_namespace = {"base": base,
"class_namespace": class_namespace,
"__name__": alleged_module,
}
MyType = eval("type('MyType', (base,), class_namespace)",
eval_namespace)
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
return np.arange(3, 5).view(MyType)
else:
return MyType()
def check(obj, binop_override_expected, ufunc_override_expected,
inplace_override_expected, check_scalar=True):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
check_objs = [np.arange(3, 5, dtype=dtype)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
arr_method = getattr(arr, "__{0}__".format(op))
def first_out_arg(result):
if op == "divmod":
assert_(isinstance(result, tuple))
return result[0]
else:
return result
# arr __op__ obj
if binop_override_expected:
assert_equal(arr_method(obj), NotImplemented, err_msg)
elif ufunc_override_expected:
assert_equal(arr_method(obj)[0], "__array_ufunc__",
err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_method(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_method, obj, err_msg=err_msg)
# obj __op__ arr
arr_rmethod = getattr(arr, "__r{0}__".format(op))
if ufunc_override_expected:
res = arr_rmethod(obj)
assert_equal(res[0], "__array_ufunc__",
err_msg=err_msg)
assert_equal(res[1], ufunc, err_msg=err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
res = first_out_arg(arr_rmethod(obj))
assert_(res.__class__ is obj.__class__, err_msg)
else:
# __array_ufunc__ = "asdf" creates a TypeError
assert_raises((TypeError, Coerced),
arr_rmethod, obj, err_msg=err_msg)
# arr __iop__ obj
# array scalars don't have in-place operators
if has_inplace and isinstance(arr, np.ndarray):
arr_imethod = getattr(arr, "__i{0}__".format(op))
if inplace_override_expected:
assert_equal(arr_method(obj), NotImplemented,
err_msg=err_msg)
elif ufunc_override_expected:
res = arr_imethod(obj)
assert_equal(res[0], "__array_ufunc__", err_msg)
assert_equal(res[1], ufunc, err_msg)
assert_(type(res[-1]["out"]) is tuple, err_msg)
assert_(res[-1]["out"][0] is arr, err_msg)
else:
if (isinstance(obj, np.ndarray) and
(type(obj).__array_ufunc__ is
np.ndarray.__array_ufunc__)):
# __array__ gets ignored
assert_(arr_imethod(obj) is arr, err_msg)
else:
assert_raises((TypeError, Coerced),
arr_imethod, obj,
err_msg=err_msg)
op_fn = getattr(operator, op, None)
if op_fn is None:
op_fn = getattr(operator, op + "_", None)
if op_fn is None:
op_fn = getattr(builtins, op)
assert_equal(op_fn(obj, arr), "forward", err_msg)
if not isinstance(obj, np.ndarray):
if binop_override_expected:
assert_equal(op_fn(arr, obj), "reverse", err_msg)
elif ufunc_override_expected:
assert_equal(op_fn(arr, obj)[0], "__array_ufunc__",
err_msg)
if ufunc_override_expected:
assert_equal(ufunc(obj, arr)[0], "__array_ufunc__",
err_msg)
# No array priority, no array_ufunc -> nothing called
check(make_obj(object), False, False, False)
# Negative array priority, no array_ufunc -> nothing called
# (has to be very negative, because scalar priority is -1000000.0)
check(make_obj(object, array_priority=-2**30), False, False, False)
# Positive array priority, no array_ufunc -> binops and iops only
check(make_obj(object, array_priority=1), True, False, True)
# ndarray ignores array_priority for ndarray subclasses
check(make_obj(np.ndarray, array_priority=1), False, False, False,
check_scalar=False)
# Positive array_priority and array_ufunc -> array_ufunc only
check(make_obj(object, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
check(make_obj(np.ndarray, array_priority=1,
array_ufunc=array_ufunc_impl), False, True, False)
# array_ufunc set to None -> defer binops only
check(make_obj(object, array_ufunc=None), True, False, False)
check(make_obj(np.ndarray, array_ufunc=None), True, False, False,
check_scalar=False)
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_array_ufunc_index(self):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
class CheckIndex(object):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
return i
# calls below mean we must be in an output.
for j, a in enumerate(kw['out']):
if a is self:
return (j,)
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), (0,))
assert_equal(np.sin(dummy, out=a), (0,))
assert_equal(np.sin(dummy, out=(a,)), (0,))
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), (0,))
assert_equal(np.modf(dummy, None, a), (1,))
assert_equal(np.modf(dummy, dummy, a), (1,))
assert_equal(np.modf(dummy, out=(a, None)), (0,))
assert_equal(np.modf(dummy, out=(a, dummy)), (0,))
assert_equal(np.modf(dummy, out=(None, a)), (1,))
assert_equal(np.modf(dummy, out=(dummy, a)), (1,))
assert_equal(np.modf(a, out=(dummy, a)), 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), (0,))
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), (0,))
assert_equal(np.add(dummy, dummy, out=(a,)), (0,))
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# regression test for github bug 4753
class OutClass(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][0][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
def test_pow_override_with_errors(self):
# regression test for gh-9112
class PowerOnly(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
if ufunc is not np.power:
raise NotImplementedError
return "POWER!"
# explicit cast to float, to ensure the fast power path is taken.
a = np.array(5., dtype=np.float64).view(PowerOnly)
assert_equal(a ** 2.5, "POWER!")
with assert_raises(NotImplementedError):
a ** 0.5
with assert_raises(NotImplementedError):
a ** 0
with assert_raises(NotImplementedError):
a ** 1
with assert_raises(NotImplementedError):
a ** -1
with assert_raises(NotImplementedError):
a ** 2
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool_()
assert_(type(~(a & a)) is np.bool_)
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
assert_(isinstance(x[0], int))
assert_(type(x[0, ...]) is np.ndarray)
class TestPickling(object):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version0_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version0_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(s)
assert_equal(a, p)
def test_version1_float32(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(s)
assert_equal(a, p)
def test_version1_object(self):
s = b'\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(s)
assert_equal(a, p)
def test_subarray_int_shape(self):
s = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(s)
assert_equal(a, p)
class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([u"This", u"is", u"example"])
g2 = np.array([u"This", u"was", u"example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
max_val = np.max(arr)
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
assert_raises(np.AxisError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(np.AxisError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
with io.open(self.filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from(b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from(b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from(b'1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from(b'1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = b'1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(s)
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, b'', np.array([]), {}
class TestFlat(object):
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_0d_shape(self):
# to it multiple times to test it does not break alloc cache gh-9216
for i in range(10):
x = np.empty((1,))
x.resize(())
assert_equal(x.shape, ())
assert_equal(x.size, 1)
x = np.empty(())
x.resize((1,))
assert_equal(x.shape, (1,))
assert_equal(x.size, 1)
def test_invalid_arguments(self):
assert_raises(TypeError, np.eye(3).resize, 'hi')
assert_raises(ValueError, np.eye(3).resize, -1)
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(b'a', int)])
assert_raises(TypeError, np.dtype, [(('b', b'a'), int)])
dt = np.dtype([((b'a', 'b'), int)])
assert_raises(ValueError, dt.__getitem__, b'a')
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, b'a')
y = x[0]
assert_raises(IndexError, y.__getitem__, b'a')
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = u'b'
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = u'b'
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, b'f1', 1)
assert_raises(IndexError, a.__getitem__, b'f1')
assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1)
assert_raises(IndexError, a['f1'].__getitem__, b'sf1')
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
with suppress_warnings() as sup:
sup.filter(FutureWarning,
"Assignment between structured arrays.*")
sup.filter(FutureWarning,
"Numpy has detected that you .*")
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
(2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
(3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
(2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
# For <=1.12 a is not modified, but it will be in 1.13
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
# make sure views of a multi-field index warn too
c = np.zeros(3, dtype='i8,i8,i8')
assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
[FutureWarning])
# make sure assignment using a different dtype warns
a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
assert_(hash(a[0]) == hash(a[1]))
assert_(hash(a[0]) == hash(b[0]))
assert_(hash(a[0]) != hash(b[1]))
assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(object):
funcs = [_mean, _var, _std]
def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_python_type(self):
for x in (np.float16(1.), 1, 1., 1+0j):
assert_equal(np.mean([x]), 1.)
assert_equal(np.std([x]), 0.)
assert_equal(np.var([x]), 0.)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(object):
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=float)
b = np.ones((2, 2), dtype=float)
tgt = np.full((2,2), 2, dtype=float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(object):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(object):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(object):
def test_basic(self):
m = np.array([1, 2, 3])
assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
assert_equal(np.alen(m), 2)
m = [1, 2, 3]
assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
assert_equal(np.alen(m), 2)
def test_singleton(self):
assert_equal(np.alen(5), 1)
class TestChoose(object):
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(object):
def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(object):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(object):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align*(1 + (n-1)//align)
base = dict(formats=['i'], names=['f0'])
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align*(1 + (n-1)//align))
self._check('(3)T{ix}', (dict(
names=['f0'],
formats=['i'],
offsets=[0],
itemsize=aligned(size + 1)
), (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b'xxx', True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
b'aaaa', 'bbbb', b' ', True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except Exception:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(object):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
test_pydatamem_seteventhook_end()
class TestMapIter(object):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(object):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(object):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
class NotConvertible(object):
def __bool__(self):
raise NotImplementedError
__nonzero__ = __bool__ # python 2
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
self_containing = np.array([None])
self_containing[0] = self_containing
try:
Error = RecursionError
except NameError:
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
class TestWhere(object):
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
def test_empty_result(self):
# pass empty where result through an assignment which reads the data of
# empty arrays, error detectable with valgrind, see gh-8922
x = np.zeros((1, 1))
ibad = np.vstack(np.where(x == 99.))
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
def test_largedim(self):
# invalid read regression gh-9304
shape = [10, 2, 3, 4, 5, 6]
np.random.seed(2)
array = np.random.rand(*shape)
for i in range(10):
benchmark = array.nonzero()
result = array.nonzero()
assert_array_equal(benchmark, result)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(object):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(object):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
assert_(not isinstance(x, collections.Hashable))
class TestArrayPriority(object):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
# See #7949. Dont use "/" operator With -3 switch, since python reports it
# as a DeprecationWarning
if sys.version_info[0] < 3 and not sys.py3kwarning:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(object):
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
assert_(a)
class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
assert_(a)
class TestCTypes(object):
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
from numpy.core import _internal
_internal.ctypes = None
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
assert_(isinstance(test_arr.ctypes._ctypes,
_internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
class MyAlwaysEqual(object):
def __eq__(self, other):
return "eq"
def __ne__(self, other):
return "ne"
class MyAlwaysEqualOld(MyAlwaysEqual):
__array_priority__ = 10000
class MyAlwaysEqualNew(MyAlwaysEqual):
__array_ufunc__ = None
array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
my_always_equal = my_always_equal_cls()
assert_equal(my_always_equal == array, 'eq')
assert_equal(array == my_always_equal, 'eq')
assert_equal(my_always_equal != array, 'ne')
assert_equal(array != my_always_equal, 'ne')
def test_npymath_complex():
# Smoketest npymath functions
from numpy.core.multiarray_tests import (
npy_cabs, npy_carg)
funcs = {npy_cabs: np.absolute,
npy_carg: np.angle}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.complex64, np.complex128, np.clongdouble)
for fun, npfun in funcs.items():
for x, y in itertools.product(vals, vals):
for t in types:
z = t(complex(x, y))
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
def test_npymath_real():
# Smoketest npymath functions
from numpy.core.multiarray_tests import (
npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
funcs = {npy_log10: np.log10,
npy_cosh: np.cosh,
npy_sinh: np.sinh,
npy_tan: np.tan,
npy_tanh: np.tanh}
vals = (1, np.inf, -np.inf, np.nan)
types = (np.float32, np.float64, np.longdouble)
with np.errstate(all='ignore'):
for fun, npfun in funcs.items():
for x, t in itertools.product(vals, types):
z = t(x)
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
nwjs/chromium.src | tools/perf/cli_tools/soundwave/worker_pool.py | 10 | 2508 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Use a pool of workers to concurrently process a sequence of items.
Example usage:
from soundwave import worker_pool
def MyWorker(args):
# This is called once for each worker to initialize it.
def Process(item):
# This will be called once for each item processed by this worker.
# Hook up the Process function so the worker_pool module can find it.
worker_pool.Process = Process
args.processes = 10 # Set number of processes to be used by the pool.
worker_pool.Run('This might take a while: ', MyWorker, args, items)
"""
import logging
import multiprocessing
import sys
from core.external_modules import pandas
# Worker implementations override this value
Process = NotImplemented # pylint: disable=invalid-name
def ProgressIndicator(label, iterable, stream=None):
if stream is None:
stream = sys.stdout
stream.write(label)
stream.flush()
for _ in iterable:
stream.write('.')
stream.flush()
stream.write('\n')
stream.flush()
def Run(label, worker, args, items, stream=None):
"""Use a pool of workers to concurrently process a sequence of items.
Args:
label: A string displayed by the progress indicator when the job starts.
worker: A function with the worker implementation. See example above.
args: An argparse.Namespace() object used to initialize the workers. The
value of args.processes is the number of processes used by the pool.
items: An iterable with items to process by the pool of workers.
stream: A file-like object for the progress indicator output, defaults to
sys.stdout.
Returns:
Total time in seconds spent by the pool to process all items.
"""
pool = multiprocessing.Pool(
processes=args.processes, initializer=worker, initargs=(args,))
time_started = pandas.Timestamp.utcnow()
try:
ProgressIndicator(label, pool.imap_unordered(_Worker, items), stream=stream)
time_finished = pandas.Timestamp.utcnow()
finally:
# Ensure resources (e.g. db connections from workers) are freed up.
pool.terminate()
pool.join()
return (time_finished - time_started).total_seconds()
def _Worker(item):
try:
Process(item) # pylint: disable=not-callable
except KeyboardInterrupt:
pass
except:
logging.exception('Worker failed with exception')
raise
| bsd-3-clause |
ndaniels/Ammolite | scripts/figure-generators/smsdIsoCompare.py | 1 | 1534 | import matplotlib.pyplot as plt
from pylab import polyfit, poly1d, show, savefig
import sys
def isNumber( s):
try:
float(s)
return True
except ValueError:
return False
def makeGraph(X,Y, xName, yName, name="NoName"):
fig = plt.figure()
ax = fig.add_subplot(111)
superName = "Comparison of {} and {}".format(xName,yName)
outname = "{} from {}.png".format(superName,name)
fig.suptitle(superName)
ax.scatter(X,Y)
fit = polyfit(X,Y,1)
fit_fn = poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
ax.plot(X,Y, 'yo', X, fit_fn(X), '--k')
ax.set_xlabel('Size of MCS found by {}'.format(xName))
ax.set_ylabel('Size of MCS found by {}'.format(yName))
ax.text(1, 1, "y = {}*x + {}".format(fit[0], fit[1]))
fig.savefig(outname)
def buildIsoSMSDComparison( filename, outname="SMSD-IsoRank-comparison"):
X, Y, xName, yName = [], [], "", ""
with open( filename) as f:
inComparison = False
nameLine = False
for line in f:
if line.split()[0] == "COMPARISON_DELIMITER":
if inComparison:
makeGraph( X, Y, xName, yName, filename)
inComparison = True
nameLine = True
X, Y = [], []
elif inComparison:
l = line.split()
if nameLine:
xName, yName = l[0], l[1]
nameLine = False
else:
X.append( float( l[0]))
Y.append( float( l[1]))
makeGraph( X, Y, xName, yName, filename)
if __name__ == "__main__":
args = sys.argv
if(len(args) == 2):
buildIsoSMSDComparison(args[1])
else:
buildIsoSMSDComparison(args[1], args[2])
| gpl-2.0 |
waterponey/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/neighbors/tests/test_dist_metrics.py | 48 | 4949 | import itertools
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
assert_array_almost_equal(D1, D2)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/computation/ops.py | 7 | 15881 | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.types.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def isscalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def isscalar(self):
return all(operand.isscalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.isscalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.isscalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.isscalar or self.rhs.isscalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| mit |
themrmax/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 11 | 25443 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=-1, verbose=2,
random_state=1, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
| bsd-3-clause |
hirofumi0810/asr_preprocessing | swbd/main.py | 1 | 15071 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Make dataset for the End-to-End model (Switchboard corpus).
Note that feature extraction depends on transcripts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import sys
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
from collections import Counter
import pickle
sys.path.append('../')
from swbd.path import Path
from swbd.input_data import read_audio
from swbd.labels.ldc97s62.character import read_trans
from swbd.labels.fisher.character import read_trans as read_trans_fisher
from swbd.labels.eval2000.stm import read_stm
from utils.util import mkdir_join
from utils.inputs.wav_split import split_wav
from utils.dataset import add_element
parser = argparse.ArgumentParser()
parser.add_argument('--swbd_audio_path', type=str,
help='path to LDC97S62 audio files')
parser.add_argument('--swbd_trans_path', type=str,
help='path to LDC97S62 transciption files')
parser.add_argument('--fisher_path', type=str, help='path to Fisher dataset')
parser.add_argument('--eval2000_audio_path', type=str,
help='path to audio files of eval2000 dataset')
parser.add_argument('--eval2000_trans_path', type=str,
help='path to transcript files of eval2000 dataset')
parser.add_argument('--dataset_save_path', type=str,
help='path to save dataset')
parser.add_argument('--feature_save_path', type=str,
help='path to save input features')
parser.add_argument('--run_root_path', type=str,
help='path to run this script')
parser.add_argument('--tool', type=str,
choices=['htk', 'python_speech_features', 'librosa'])
parser.add_argument('--wav_save_path', type=str, help='path to wav files.')
parser.add_argument('--htk_save_path', type=str, help='path to htk files.')
parser.add_argument('--normalize', type=str,
choices=['global', 'speaker', 'utterance', 'no'])
parser.add_argument('--save_format', type=str, choices=['numpy', 'htk', 'wav'])
parser.add_argument('--feature_type', type=str, choices=['fbank', 'mfcc'])
parser.add_argument('--channels', type=int,
help='the number of frequency channels')
parser.add_argument('--window', type=float,
help='window width to extract features')
parser.add_argument('--slide', type=float, help='extract features per slide')
parser.add_argument('--energy', type=int, help='if 1, add the energy feature')
parser.add_argument('--delta', type=int, help='if 1, add the energy feature')
parser.add_argument('--deltadelta', type=int,
help='if 1, double delta features are also extracted')
parser.add_argument('--fisher', type=int,
help='If True, create large-size dataset (2000h).')
args = parser.parse_args()
path = Path(swbd_audio_path=args.swbd_audio_path,
swbd_trans_path=args.swbd_trans_path,
fisher_path=args.fisher_path,
eval2000_audio_path=args.eval2000_audio_path,
eval2000_trans_path=args.eval2000_trans_path,
wav_save_path=args.wav_save_path,
htk_save_path=args.htk_save_path,
run_root_path='./')
CONFIG = {
'feature_type': args.feature_type,
'channels': args.channels,
'sampling_rate': 8000,
'window': args.window,
'slide': args.slide,
'energy': bool(args.energy),
'delta': bool(args.delta),
'deltadelta': bool(args.deltadelta)
}
if args.save_format == 'htk':
assert args.tool == 'htk'
def main(data_size):
print('=' * 50)
print(' data_size: %s' % data_size)
print('=' * 50)
########################################
# labels
########################################
print('=> Processing transcripts...')
speaker_dict_dict = {} # dict of speaker_dict
print('---------- train ----------')
if data_size == '300h':
speaker_dict_dict['train'] = read_trans(
label_paths=path.trans(corpus='swbd'),
word_boundary_paths=path.word(corpus='swbd'),
run_root_path='./',
vocab_file_save_path=mkdir_join('./config/vocab_files'),
save_vocab_file=True)
elif data_size == '2000h':
speaker_dict_a, char_set_a, char_capital_set_a, word_count_dict_a = read_trans_fisher(
label_paths=path.trans(corpus='fisher'),
target_speaker='A')
speaker_dict_b, char_set_b, char_capital_set_b, word_count_dict_b = read_trans_fisher(
label_paths=path.trans(corpus='fisher'),
target_speaker='B')
# Meage 2 dictionaries
speaker_dict = merge_dicts([speaker_dict_a, speaker_dict_b])
char_set = char_set_a | char_set_b
char_capital_set = char_capital_set_a | char_capital_set_b
word_count_dict_fisher = dict(
Counter(word_count_dict_a) + Counter(word_count_dict_b))
speaker_dict_dict['train'] = read_trans(
label_paths=path.trans(corpus='swbd'),
word_boundary_paths=path.word(corpus='swbd'),
run_root_path='./',
vocab_file_save_path=mkdir_join('./config/vocab_files'),
save_vocab_file=True,
speaker_dict_fisher=speaker_dict,
char_set=char_set,
char_capital_set=char_capital_set,
word_count_dict=word_count_dict_fisher)
del speaker_dict
print('---------- eval2000 (swbd + ch) ----------')
speaker_dict_dict['eval2000_swbd'], speaker_dict_dict['eval2000_ch'] = read_stm(
stm_path=path.stm_path,
pem_path=path.pem_path,
glm_path=path.glm_path,
run_root_path='./')
########################################
# inputs
########################################
print('\n=> Processing input data...')
input_save_path = mkdir_join(
args.feature_save_path, args.save_format, data_size)
for data_type in ['train', 'eval2000_swbd', 'eval2000_ch']:
print('---------- %s ----------' % data_type)
if isfile(join(input_save_path, data_type, 'complete.txt')):
print('Already exists.')
else:
if args.save_format == 'wav':
########################################
# Split WAV files per utterance
########################################
if data_type == 'train':
wav_paths = path.wav(corpus='swbd')
if data_size == '2000h':
wav_paths += path.wav(corpus='fisher')
else:
wav_paths = path.wav(corpus=data_type)
split_wav(wav_paths=wav_paths,
speaker_dict=speaker_dict_dict[data_type],
save_path=mkdir_join(input_save_path, data_type))
# NOTE: ex.) save_path:
# swbd/feature/save_format/data_size/data_type/speaker/utt_name.npy
elif args.save_format in ['numpy', 'htk']:
if data_type == 'train':
if args.tool == 'htk':
audio_paths = path.htk(corpus='swbd')
if data_size == '2000h':
audio_paths += path.htk(corpus='fisher')
else:
audio_paths = path.wav(corpus='swbd')
if data_size == '2000h':
audio_paths += path.wav(corpus='fisher')
is_training = True
global_mean, global_std = None, None
else:
if args.tool == 'htk':
audio_paths = path.htk(corpus=data_type)
else:
audio_paths = path.wav(corpus=data_type)
is_training = False
# Load statistics over train dataset
global_mean = np.load(
join(input_save_path, 'train/global_mean.npy'))
global_std = np.load(
join(input_save_path, 'train/global_std.npy'))
read_audio(audio_paths=audio_paths,
tool=args.tool,
config=CONFIG,
normalize=args.normalize,
speaker_dict=speaker_dict_dict[data_type],
is_training=is_training,
save_path=mkdir_join(input_save_path, data_type),
save_format=args.save_format,
global_mean=global_mean,
global_std=global_std)
# NOTE: ex.) save_path:
# swbd/feature/save_format/data_size/data_type/speaker/*.npy
# Make a confirmation file to prove that dataset was saved
# correctly
with open(join(input_save_path, data_type, 'complete.txt'), 'w') as f:
f.write('')
########################################
# dataset (csv)
########################################
print('\n=> Saving dataset files...')
dataset_save_path = mkdir_join(
args.dataset_save_path, args.save_format, data_size, data_type)
print('---------- %s ----------' % data_type)
df_columns = ['frame_num', 'input_path', 'transcript']
df_char = pd.DataFrame([], columns=df_columns)
df_char_capital = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
with open(join(input_save_path, data_type, 'frame_num.pickle'), 'rb') as f:
frame_num_dict = pickle.load(f)
utt_count = 0
df_char_list, df_char_capital_list = [], []
df_word_freq1_list, df_word_freq5_list = [], []
df_word_freq10_list, df_word_freq15_list = [], []
speaker_dict = speaker_dict_dict[data_type]
for speaker, utt_dict in tqdm(speaker_dict.items()):
for utt_index, utt_info in utt_dict.items():
if args.save_format == 'numpy':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.npy')
elif args.save_format == 'htk':
input_utt_save_path = join(
input_save_path, data_type, speaker, speaker + '_' + utt_index + '.htk')
elif args.save_format == 'wav':
input_utt_save_path = path.utt2wav(utt_index)
else:
raise ValueError('save_format is numpy or htk or wav.')
assert isfile(input_utt_save_path)
frame_num = frame_num_dict[speaker + '_' + utt_index]
char_indices, char_indices_capital, word_freq1_indices = utt_info[2:5]
word_freq5_indices, word_freq10_indices, word_freq15_indices = utt_info[5:8]
df_char = add_element(
df_char, [frame_num, input_utt_save_path, char_indices])
df_char_capital = add_element(
df_char_capital, [frame_num, input_utt_save_path, char_indices_capital])
df_word_freq1 = add_element(
df_word_freq1, [frame_num, input_utt_save_path, word_freq1_indices])
df_word_freq5 = add_element(
df_word_freq5, [frame_num, input_utt_save_path, word_freq5_indices])
df_word_freq10 = add_element(
df_word_freq10, [frame_num, input_utt_save_path, word_freq10_indices])
df_word_freq15 = add_element(
df_word_freq15, [frame_num, input_utt_save_path, word_freq15_indices])
utt_count += 1
# Reset
if utt_count == 10000:
df_char_list.append(df_char)
df_char_capital_list.append(df_char_capital)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
df_char = pd.DataFrame([], columns=df_columns)
df_char_capital = pd.DataFrame([], columns=df_columns)
df_word_freq1 = pd.DataFrame([], columns=df_columns)
df_word_freq5 = pd.DataFrame([], columns=df_columns)
df_word_freq10 = pd.DataFrame([], columns=df_columns)
df_word_freq15 = pd.DataFrame([], columns=df_columns)
utt_count = 0
# Last dataframe
df_char_list.append(df_char)
df_char_capital_list.append(df_char_capital)
df_word_freq1_list.append(df_word_freq1)
df_word_freq5_list.append(df_word_freq5)
df_word_freq10_list.append(df_word_freq10)
df_word_freq15_list.append(df_word_freq15)
# Concatenate all dataframes
df_char = df_char_list[0]
df_char_capital = df_char_capital_list[0]
df_word_freq1 = df_word_freq1_list[0]
df_word_freq5 = df_word_freq5_list[0]
df_word_freq10 = df_word_freq10_list[0]
df_word_freq15 = df_word_freq15_list[0]
for df_i in df_char_list[1:]:
df_char = pd.concat([df_char, df_i], axis=0)
for df_i in df_char_list[1:]:
df_char_capital = pd.concat([df_char_capital, df_i], axis=0)
for df_i in df_word_freq1_list[1:]:
df_word_freq1 = pd.concat([df_word_freq1, df_i], axis=0)
for df_i in df_word_freq5_list[1:]:
df_word_freq5 = pd.concat([df_word_freq5, df_i], axis=0)
for df_i in df_word_freq10_list[1:]:
df_word_freq10 = pd.concat([df_word_freq10, df_i], axis=0)
for df_i in df_word_freq15_list[1:]:
df_word_freq15 = pd.concat([df_word_freq15, df_i], axis=0)
df_char.to_csv(join(dataset_save_path, 'character.csv'))
df_char_capital.to_csv(
join(dataset_save_path, 'character_capital_divide.csv'))
df_word_freq1.to_csv(join(dataset_save_path, 'word_freq1.csv'))
df_word_freq5.to_csv(join(dataset_save_path, 'word_freq5.csv'))
df_word_freq10.to_csv(join(dataset_save_path, 'word_freq10.csv'))
df_word_freq15.to_csv(join(dataset_save_path, 'word_freq15.csv'))
def merge_dicts(dicts):
return {k: v for dic in dicts for k, v in dic.items()}
if __name__ == '__main__':
data_sizes = ['2000h']
# data_sizes = ['300h']
# if bool(args.fisher):
# data_sizes += ['2000h']
for data_size in data_sizes:
main(data_size)
| mit |
zrhans/pythonanywhere | pyscripts/ply_wrose.py | 1 | 1678 | """
DATA,Chuva,Chuva_min,Chuva_max,VVE,VVE_min,VVE_max,DVE,DVE_min,DVE_max,Temp.,Temp._min,Temp._max,Umidade,Umidade_min,Umidade_max,Rad.,Rad._min,Rad._max,Pres.Atm.,Pres.Atm._min,Pres.Atm._max,Temp.Int.,Temp.Int._min,Temp.Int._max,CH4,CH4_min,CH4_max,HCnM,HCnM_min,HCnM_max,HCT,HCT_min,HCT_max,SO2,SO2_min,SO2_max,O3,O3_min,O3_max,NO,NO_min,NO_max,NO2,NO2_min,NO2_max,NOx,NOx_min,NOx_max,CO,CO_min,CO_max,MP10,MP10_min,MP10_max,MPT,MPT_min,MPT_max,Fin,Fin_min,Fin_max,Vin,Vin_min,Vin_max,Vout,Vout_min,Vout_max
"""
import plotly.plotly as py # Every function in this module will communicate with an external plotly server
import plotly.graph_objs as go
import pandas as pd
DATAFILE = r'/home/zrhans/w3/bns/bns_2016-1.csv'
df = pd.read_csv(DATAFILE, parse_dates=True, sep=',', header=0, index_col='DATA')
x = df.DVE
y = df.VVE
#print(y)
# Definindo as series dedados
trace1 = go.Area(
r = y,#["2015-12-01","2015-12-01 01:00:00","2015-12-01 02:00:00","2015-12-01 03:00:00","2015-12-01 04:00:00","2015-12-01 05:00:00"],
t = x,#[74.73,76.59,76.5,79.03,77.89,81.9,],
name='Vento m/s',
marker=dict(
color='rgb(158,154,200)'
)
)
# Edit the layout
layout = go.Layout(
title='Distribuição da Velocidade do Vento no diagrama Laurel',
font = dict(size=16),
radialaxis=dict(
ticksuffix='m/s'
),
orientation=270
)
data = [trace1]
fig = go.Figure(data=data, layout=layout)
# Tracando o objeto
py.plot(
fig,
filename='hans/oi_wrose', # name of the file as saved in your plotly account
sharing='public'
) # 'public' | 'private' | 'secret': Learn more: https://plot.ly/python/privacy
| apache-2.0 |
gdementen/PyTables | c-blosc/bench/plot-speeds.py | 11 | 6852 | """Script for plotting the results of the 'suite' benchmark.
Invoke without parameters for usage hints.
:Author: Francesc Alted
:Date: 2010-06-01
"""
import matplotlib as mpl
from pylab import *
KB_ = 1024
MB_ = 1024*KB_
GB_ = 1024*MB_
NCHUNKS = 128 # keep in sync with bench.c
linewidth=2
#markers= ['+', ',', 'o', '.', 's', 'v', 'x', '>', '<', '^']
#markers= [ 'x', '+', 'o', 's', 'v', '^', '>', '<', ]
markers= [ 's', 'o', 'v', '^', '+', 'x', '>', '<', '.', ',' ]
markersize = 8
def get_values(filename):
f = open(filename)
values = {"memcpyw": [], "memcpyr": []}
for line in f:
if line.startswith('-->'):
tmp = line.split('-->')[1]
nthreads, size, elsize, sbits, codec = [i for i in tmp.split(', ')]
nthreads, size, elsize, sbits = map(int, (nthreads, size, elsize, sbits))
values["size"] = size * NCHUNKS / MB_;
values["elsize"] = elsize;
values["sbits"] = sbits;
values["codec"] = codec
# New run for nthreads
(ratios, speedsw, speedsr) = ([], [], [])
# Add a new entry for (ratios, speedw, speedr)
values[nthreads] = (ratios, speedsw, speedsr)
#print "-->", nthreads, size, elsize, sbits
elif line.startswith('memcpy(write):'):
tmp = line.split(',')[1]
memcpyw = float(tmp.split(' ')[1])
values["memcpyw"].append(memcpyw)
elif line.startswith('memcpy(read):'):
tmp = line.split(',')[1]
memcpyr = float(tmp.split(' ')[1])
values["memcpyr"].append(memcpyr)
elif line.startswith('comp(write):'):
tmp = line.split(',')[1]
speedw = float(tmp.split(' ')[1])
ratio = float(line.split(':')[-1])
speedsw.append(speedw)
ratios.append(ratio)
elif line.startswith('decomp(read):'):
tmp = line.split(',')[1]
speedr = float(tmp.split(' ')[1])
speedsr.append(speedr)
if "OK" not in line:
print "WARNING! OK not found in decomp line!"
f.close()
return nthreads, values
def show_plot(plots, yaxis, legends, gtitle, xmax=None):
xlabel('Compresssion ratio')
ylabel('Speed (MB/s)')
title(gtitle)
xlim(0, xmax)
#ylim(0, 10000)
ylim(0, None)
grid(True)
# legends = [f[f.find('-'):f.index('.out')] for f in filenames]
# legends = [l.replace('-', ' ') for l in legends]
#legend([p[0] for p in plots], legends, loc = "upper left")
legend([p[0] for p in plots
if not isinstance(p, mpl.lines.Line2D)],
legends, loc = "best")
#subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2)
if outfile:
print "Saving plot to:", outfile
savefig(outfile, dpi=64)
else:
show()
if __name__ == '__main__':
from optparse import OptionParser
usage = "usage: %prog [-r] [-o outfile] [-t title ] [-d|-c] filename"
compress_title = 'Compression speed'
decompress_title = 'Decompression speed'
yaxis = 'No axis name'
parser = OptionParser(usage=usage)
parser.add_option('-o',
'--outfile',
dest='outfile',
help=('filename for output (many extensions '
'supported, e.g. .png, .jpg, .pdf)'))
parser.add_option('-t',
'--title',
dest='title',
help='title of the plot',)
parser.add_option('-l',
'--limit',
dest='limit',
help='expression to limit number of threads shown',)
parser.add_option('-x',
'--xmax',
dest='xmax',
help='limit the x-axis',
default=None)
parser.add_option('-r', '--report', action='store_true',
dest='report',
help='generate file for reporting ',
default=False)
parser.add_option('-d', '--decompress', action='store_true',
dest='dspeed',
help='plot decompression data',
default=False)
parser.add_option('-c', '--compress', action='store_true',
dest='cspeed',
help='plot compression data',
default=False)
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("No input arguments")
elif len(args) > 1:
parser.error("Too many input arguments")
else:
pass
if options.report and options.outfile:
parser.error("Can only select one of [-r, -o]")
if options.dspeed and options.cspeed:
parser.error("Can only select one of [-d, -c]")
elif options.cspeed:
options.dspeed = False
plot_title = compress_title
else: # either neither or dspeed
options.dspeed = True
plot_title = decompress_title
filename = args[0]
cspeed = options.cspeed
dspeed = options.dspeed
if options.outfile:
outfile = options.outfile
elif options.report:
if cspeed:
outfile = filename[:filename.rindex('.')] + '-compr.png'
else:
outfile = filename[:filename.rindex('.')] + '-decompr.png'
else:
outfile = None
plots = []
legends = []
nthreads, values = get_values(filename)
#print "Values:", values
if options.limit:
thread_range = eval(options.limit)
else:
thread_range = range(1, nthreads+1)
if options.title:
plot_title = options.title
else:
plot_title += " (%(size).1f MB, %(elsize)d bytes, %(sbits)d bits), %(codec)s" % values
gtitle = plot_title
for nt in thread_range:
#print "Values for %s threads --> %s" % (nt, values[nt])
(ratios, speedw, speedr) = values[nt]
if cspeed:
speed = speedw
else:
speed = speedr
#plot_ = semilogx(ratios, speed, linewidth=2)
plot_ = plot(ratios, speed, linewidth=2)
plots.append(plot_)
nmarker = nt
if nt >= len(markers):
nmarker = nt%len(markers)
setp(plot_, marker=markers[nmarker], markersize=markersize,
linewidth=linewidth)
legends.append("%d threads" % nt)
# Add memcpy lines
if cspeed:
mean = np.mean(values["memcpyw"])
message = "memcpy (write to memory)"
else:
mean = np.mean(values["memcpyr"])
message = "memcpy (read from memory)"
plot_ = axhline(mean, linewidth=3, linestyle='-.', color='black')
text(1.0, mean+50, message)
plots.append(plot_)
show_plot(plots, yaxis, legends, gtitle, xmax=int(options.xmax) if
options.xmax else None)
| bsd-3-clause |
andres-liiver/IAPB13_suvendatud | Kodutoo_16/Kodutoo_16_Andres.py | 1 | 2985 | '''
Kodutoo 16
14.11.2014
Andres Liiver
'''
import time
from matplotlib import pyplot as plt
from Tund16gen import *
def timeFunc(func, *args):
start = time.clock()
func(*args)
return time.clock() - start
def linear_search(lst, num):
for item in lst:
if item == num:
return True
return False
def binary_search(lst, num, sort=False):
if sort:
lst = sorted(lst)
imin = 0
imax = len(lst)-1
while imax >= imin:
imid = (imin+imax) // 2
if lst[imid] == num:
return True
elif lst[imid] < num:
imin = imid + 1
else:
imax = imid - 1
return False
def main():
linearTimes = []
binary1Times = []
binary2Times = []
ns = [2**i for i in range(1, 13)]
for n in ns:
lst, gen = gimme_my_input(n, "blah")
times = []
# linear search test
for i in range(len(lst)):
times.append(timeFunc(linear_search, lst, next(gen)))
avg_time = sum(times) / len(times)
linearTimes.append(avg_time)
# binary search test 1
times = []
sortedList = sorted(lst)
for i in range(len(lst)):
times.append(timeFunc(binary_search, sortedList, next(gen)))
avg_time = sum(times) / len(times)
binary1Times.append(avg_time)
# binary search test 2
times = []
for i in range(len(lst)):
times.append(timeFunc(binary_search, lst, next(gen), True))
avg_time = sum(times) / len(times)
binary2Times.append(avg_time)
# print table of results
print("| algorithm \t| n \t\t| time (s)")
print()
# print Linear Search
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Linear", n, linearTimes[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Linear", n, linearTimes[i]))
print()
# print Binary Search (presorted)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} | {1} \t\t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
else:
print("| {0} | {1} \t| {2:.8f}".format("Bin (presort)", n, binary1Times[i]))
print()
# print Binary Search (sort)
for i, n in enumerate(ns):
if n < 10000:
print("| {0} \t| {1} \t\t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
else:
print("| {0} \t| {1} \t| {2:.8f}".format("Bin (sort)", n, binary2Times[i]))
# plot the times
ax = plt.subplot()
ax.set_xlabel("n")
ax.set_xscale("log")
ax.set_ylabel("Time (s)")
ax.set_yscale("log")
ax.plot(ns, linearTimes, "r", label="Linear Search")
ax.plot(ns, binary1Times, "g", label="Binary Search (presorted)")
ax.plot(ns, binary2Times, "b", label="Binary Search (sort)")
ax.legend(loc="upper left", shadow=True);
plt.show()
if __name__ == "__main__":
main() | mit |
MartinDelzant/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
datapythonista/pandas | pandas/core/arrays/sparse/accessor.py | 2 | 11479 | """Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, data = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
data.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
tmp = np.mean([column.array.density for _, column in self._parent.items()])
return tmp
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
| bsd-3-clause |
alanmcruickshank/superset-dev | superset/connectors/druid/models.py | 1 | 45334 | # pylint: disable=invalid-unary-operand-type
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
import json
import logging
from multiprocessing import Pool
from dateutil.parser import parse as dparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.aggregators import count
from pydruid.utils.filters import Bound, Dimension, Filter
from pydruid.utils.having import Aggregation
from pydruid.utils.postaggregator import (
Const, Field, HyperUniqueCardinality, Postaggregator, Quantile, Quantiles,
)
import requests
from six import string_types
import sqlalchemy as sa
from sqlalchemy import (
Boolean, Column, DateTime, ForeignKey, Integer, or_, String, Text,
)
from sqlalchemy.orm import backref, relationship
from superset import conf, db, import_util, sm, utils
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.models.helpers import AuditMixinNullable, QueryResult, set_perm
from superset.utils import (
DimSelector, DTTM_ALIAS, flasher, MetricPermException,
)
DRUID_TZ = conf.get('DRUID_TZ')
# Function wrapper because bound methods cannot
# be passed to processes
def _fetch_metadata_for(datasource):
return datasource.latest_metadata()
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class CustomPostAggregator(Postaggregator):
"""A way to allow users to specify completely custom PostAggregators"""
def __init__(self, name, post_aggregator):
self.name = name
self.post_aggregator = post_aggregator
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
type = 'druid'
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer, default=8081)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer, default=8082)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
def __repr__(self):
return self.verbose_name if self.verbose_name else self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
'http://{0}:{1}/'.format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/'
'{obj.coordinator_endpoint}/datasources'
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
'http://{obj.coordinator_host}:{obj.coordinator_port}/status'
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
self.druid_version = self.get_druid_version()
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh_async(ds_refresh, merge_flag, refreshAll)
def refresh_async(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources andm
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(or_(DruidDatasource.datasource_name == name
for name in datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
'Adding new datasource [{}]'.format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
'Refreshing datasource [{}]'.format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = Pool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == datasource.datasource_name)
.filter(or_(DruidColumn.column_name == col for col in cols))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=datasource.datasource_name,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
datatype = cols[col]['type']
if datatype == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
if datatype == 'hyperUnique' or datatype == 'thetaSketch':
col_obj.count_distinct = True
# Allow sum/min/max for long or double
if datatype == 'LONG' or datatype == 'DOUBLE':
col_obj.sum = True
col_obj.min = True
col_obj.max = True
col_obj.type = datatype
col_obj.datasource = datasource
datasource.generate_metrics_for(col_objs_list)
session.commit()
@property
def perm(self):
return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
def get_perm(self):
return self.perm
@property
def name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
@property
def unique_name(self):
return self.verbose_name if self.verbose_name else self.cluster_name
class DruidColumn(Model, BaseColumn):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('columns', cascade='all, delete-orphan'),
enable_typechecks=False)
dimension_spec_json = Column(Text)
export_fields = (
'datasource_name', 'column_name', 'is_active', 'type', 'groupby',
'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable',
'description', 'dimension_spec_json',
)
def __repr__(self):
return self.column_name
@property
def expression(self):
return self.dimension_spec_json
@property
def dimension_spec(self):
if self.dimension_spec_json:
return json.loads(self.dimension_spec_json)
def get_metrics(self):
metrics = {}
metrics['count'] = DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'}),
)
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.is_num:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.avg and self.is_num:
mt = corrected_type.lower() + 'Avg'
name = 'avg__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='avg',
verbose_name='AVG({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.min and self.is_num:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.max and self.is_num:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics[name] = DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name}),
)
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name,
}),
)
else:
metrics[name] = DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]}),
)
return metrics
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(
DruidMetric.metric_name == m for m in metrics
))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
db.session.add(metric)
@classmethod
def import_obj(cls, i_column):
def lookup_obj(lookup_column):
return db.session.query(DruidColumn).filter(
DruidColumn.datasource_name == lookup_column.datasource_name,
DruidColumn.column_name == lookup_column.column_name).first()
return import_util.import_simple_obj(db.session, i_column, lookup_obj)
class DruidMetric(Model, BaseMetric):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
'DruidDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
json = Column(Text)
export_fields = (
'metric_name', 'verbose_name', 'metric_type', 'datasource_name',
'json', 'description', 'is_restricted', 'd3format',
)
@property
def expression(self):
return self.json
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
'{parent_name}.[{obj.metric_name}](id:{obj.id})'
).format(obj=self,
parent_name=self.datasource.full_name,
) if self.datasource else None
@classmethod
def import_obj(cls, i_metric):
def lookup_obj(lookup_metric):
return db.session.query(DruidMetric).filter(
DruidMetric.datasource_name == lookup_metric.datasource_name,
DruidMetric.metric_name == lookup_metric.metric_name).first()
return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
class DruidDatasource(Model, BaseDatasource):
"""ORM object referencing Druid datasources (tables)"""
__tablename__ = 'datasources'
type = 'druid'
query_langtage = 'json'
cluster_class = DruidCluster
metric_class = DruidMetric
column_class = DruidColumn
baselink = 'druiddatasourcemodelview'
# Columns
datasource_name = Column(String(255), unique=True)
is_hidden = Column(Boolean, default=False)
fetch_values_from = Column(String(100))
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship(
sm.user_model,
backref=backref('datasources', cascade='all, delete-orphan'),
foreign_keys=[user_id])
export_fields = (
'datasource_name', 'is_hidden', 'description', 'default_endpoint',
'cluster_name', 'offset', 'cache_timeout', 'params',
)
@property
def database(self):
return self.cluster
@property
def connection(self):
return str(self.database)
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.is_num]
@property
def name(self):
return self.datasource_name
@property
def schema(self):
ds_name = self.datasource_name or ''
name_pieces = ds_name.split('.')
if len(name_pieces) > 1:
return name_pieces[0]
else:
return None
@property
def schema_perm(self):
"""Returns schema permission if present, cluster one otherwise."""
return utils.get_schema_perm(self.cluster, self.schema)
def get_perm(self):
return (
'[{obj.cluster_name}].[{obj.datasource_name}]'
'(id:{obj.id})').format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return utils.get_datasource_full_name(
self.cluster_name, self.datasource_name)
@property
def time_column_grains(self):
return {
'time_columns': [
'all', '5 seconds', '30 seconds', '1 minute',
'5 minutes', '1 hour', '6 hour', '1 day', '7 days',
'week', 'week_starting_sunday', 'week_ending_saturday',
'month',
],
'time_grains': ['now'],
}
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@classmethod
def import_obj(cls, i_datasource, import_time=None):
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d):
return db.session.query(DruidDatasource).join(DruidCluster).filter(
DruidDatasource.datasource_name == d.datasource_name,
DruidCluster.cluster_name == d.cluster_name,
).first()
def lookup_cluster(d):
return db.session.query(DruidCluster).filter_by(
cluster_name=d.cluster_name).one()
return import_util.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource,
import_time)
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
logging.info('Syncing datasource [{}]'.format(self.datasource_name))
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = dparse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which triggered a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
lbound = (max_time - timedelta(days=7)).isoformat()
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = (max_time - timedelta(1)).isoformat()
else:
rbound = max_time.isoformat()
segment_metadata = None
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed first attempt to get latest segment')
logging.exception(e)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
lbound = datetime(1901, 1, 1).isoformat()[:10]
if not self.version_higher(self.cluster.druid_version, '0.8.2'):
rbound = datetime.now().isoformat()
else:
rbound = datetime(2050, 1, 1).isoformat()[:10]
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + '/' + rbound,
merge=self.merge_flag,
analysisTypes=[])
except Exception as e:
logging.warning('Failed 2nd attempt to get latest segment')
logging.exception(e)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
self.generate_metrics_for(self.columns)
def generate_metrics_for(self, columns):
metrics = {}
for col in columns:
metrics.update(col.get_metrics())
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidCluster.cluster_name == self.cluster_name)
.filter(DruidMetric.datasource_name == self.datasource_name)
.filter(or_(DruidMetric.metric_name == m for m in metrics))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
metric.datasource_name = self.datasource_name
if not dbmetrics.get(metric.metric_name, None):
with db.session.no_autoflush:
db.session.add(metric)
@classmethod
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_name == druid_config['name'])
.filter(or_(DruidColumn.column_name == dim for dim in dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_name == druid_config['name'])
.filter(or_(DruidMetric.metric_name == spec['name']
for spec in druid_config['metrics_spec']))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@staticmethod
def time_offset(granularity):
if granularity == 'week_ending_saturday':
return 6 * 24 * 3600 * 1000 # 6 days
return 0
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://druid.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(period_name, timezone=None, origin=None):
if not period_name or period_name == 'all':
return 'all'
iso_8601_dict = {
'5 seconds': 'PT5S',
'30 seconds': 'PT30S',
'1 minute': 'PT1M',
'5 minutes': 'PT5M',
'1 hour': 'PT1H',
'6 hour': 'PT6H',
'one day': 'P1D',
'1 day': 'P1D',
'7 days': 'P7D',
'week': 'P1W',
'week_starting_sunday': 'P1W',
'week_ending_saturday': 'P1W',
'month': 'P1M',
}
granularity = {'type': 'period'}
if timezone:
granularity['timeZone'] = timezone
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
if period_name in iso_8601_dict:
granularity['period'] = iso_8601_dict[period_name]
if period_name in ('week_ending_saturday', 'week_starting_sunday'):
# use Sunday as start of the week
granularity['origin'] = '2016-01-03T00:00:00'
elif not isinstance(period_name, string_types):
granularity['type'] = 'duration'
granularity['duration'] = period_name
elif period_name.startswith('P'):
# identify if the string is the iso_8601 period
granularity['period'] = period_name
else:
granularity['type'] = 'duration'
granularity['duration'] = utils.parse_human_timedelta(
period_name).total_seconds() * 1000
return granularity
@staticmethod
def _metrics_and_post_aggs(metrics, metrics_dict):
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_type = _conf.get('type')
_field = _conf.get('field')
_fields = _conf.get('fields')
field_names = []
if _type in ['fieldAccess', 'hyperUniqueCardinality',
'quantile', 'quantiles']:
field_names.append(_conf.get('fieldName', ''))
if _field:
field_names += recursive_get_fields(_field)
if _fields:
for _f in _fields:
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
mconf = metric.json_obj
all_metrics += recursive_get_fields(mconf)
all_metrics += mconf.get('fieldNames', [])
if mconf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
post_aggs[metric_name] = Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
post_aggs[metric_name] = Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
post_aggs[metric_name] = Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
post_aggs[metric_name] = Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
post_aggs[metric_name] = HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
post_aggs[metric_name] = Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
post_aggs[metric_name] = CustomPostAggregator(
mconf.get('name', ''),
mconf)
return all_metrics, post_aggs
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
def get_query_str(self, query_obj, phase=1, client=None):
return self.run_query(client=client, phase=phase, **query_obj)
def _add_filter_from_pre_query_data(self, df, dimensions, dim_filter):
ret = dim_filter
if df is not None and not df.empty:
new_filters = []
for unused, row in df.iterrows():
fields = []
for dim in dimensions:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
term = Filter(type='and', fields=fields)
new_filters.append(term)
elif fields:
new_filters.append(fields[0])
if new_filters:
ff = Filter(type='or', fields=new_filters)
if not dim_filter:
ret = ff
else:
ret = Filter(type='and', fields=[ff, dim_filter])
return ret
def run_query( # noqa / druid
self,
groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, phase=2, client=None, form_data=None,
order_desc=True):
"""Runs a query against Druid and returns a dataframe.
"""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
if not is_timeseries:
granularity = 'all'
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)
to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
timezone = from_dttm.tzname()
query_str = ''
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
all_metrics, post_aggs = self._metrics_and_post_aggs(
metrics,
metrics_dict)
aggregations = OrderedDict()
for m in self.metrics:
if m.metric_name in all_metrics:
aggregations[m.metric_name] = m.json_obj
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
'Access to the metrics denied: ' + ', '.join(rejected_metrics),
)
# the dimensions list with dimensionSpecs expanded
dimensions = []
groupby = [gb for gb in groupby if gb in columns_dict]
for column_name in groupby:
col = columns_dict.get(column_name)
dim_spec = col.dimension_spec
if dim_spec:
dimensions.append(dim_spec)
else:
dimensions.append(column_name)
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity,
timezone=timezone,
origin=extras.get('druid_time_origin'),
),
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = DruidDatasource.get_filters(filter, self.num_cols)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
order_direction = 'descending' if order_desc else 'ascending'
if len(groupby) == 0 and not having_filters:
del qry['dimensions']
client.timeseries(**qry)
if (
not having_filters and
len(groupby) == 1 and
order_desc and
not isinstance(list(qry.get('dimensions'))[0], dict)
):
dim = list(qry.get('dimensions'))[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
else:
order_by = list(qry['aggregations'].keys())[0]
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['threshold'] = min(row_limit,
timeseries_limit or row_limit)
pre_qry['metric'] = order_by
pre_qry['dimension'] = dim
del pre_qry['dimensions']
client.topn(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'], filters)
qry['threshold'] = timeseries_limit or 1000
if row_limit and granularity == 'all':
qry['threshold'] = row_limit
qry['dimension'] = list(qry.get('dimensions'))[0]
qry['dimension'] = dim
del qry['dimensions']
qry['metric'] = list(qry['aggregations'].keys())[0]
client.topn(**qry)
elif len(groupby) > 1 or having_filters or not order_desc:
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
if timeseries_limit and is_timeseries:
order_by = metrics[0] if metrics else self.metrics[0]
if timeseries_limit_metric:
order_by = timeseries_limit_metric
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = 'all'
pre_qry['limit_spec'] = {
'type': 'default',
'limit': min(timeseries_limit, row_limit),
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
'columns': [{
'dimension': order_by,
'direction': order_direction,
}],
}
client.groupby(**pre_qry)
query_str += '// Two phase query\n// Phase 1\n'
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
query_str += '\n'
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'],
filters,
)
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
'type': 'default',
'limit': row_limit,
'columns': [{
'dimension': (
metrics[0] if metrics else self.metrics[0]),
'direction': order_direction,
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
return query_str
def query(self, query_obj):
qry_start_dttm = datetime.now()
client = self.cluster.get_pydruid_client()
query_str = self.get_query_str(
client=client, query_obj=query_obj, phase=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_('No data was returned.'))
df.columns = [
DTTM_ALIAS if c == 'timestamp' else c for c in df.columns]
is_timeseries = query_obj['is_timeseries'] \
if 'is_timeseries' in query_obj else True
if (
not is_timeseries and
DTTM_ALIAS in df.columns):
del df[DTTM_ALIAS]
# Reordering columns
cols = []
if DTTM_ALIAS in df.columns:
cols += [DTTM_ALIAS]
cols += [col for col in query_obj['groupby'] if col in df.columns]
cols += [col for col in query_obj['metrics'] if col in df.columns]
df = df[cols]
time_offset = DruidDatasource.time_offset(query_obj['granularity'])
def increment_timestamp(ts):
dt = utils.parse_human_datetime(ts).replace(
tzinfo=DRUID_TZ)
return dt + timedelta(milliseconds=time_offset)
if DTTM_ALIAS in df.columns and time_offset:
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters, num_cols): # noqa
filters = None
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ('in', 'not in'):
eq = [
types.replace('"', '').strip()
if isinstance(types, string_types)
else types
for types in eq]
elif not isinstance(flt['val'], string_types):
eq = eq[0] if eq and len(eq) > 0 else ''
is_numeric_col = col in num_cols
if is_numeric_col:
if op in ('in', 'not in'):
eq = [utils.string_to_num(v) for v in eq]
else:
eq = utils.string_to_num(eq)
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = Dimension(col) != eq
elif op in ('in', 'not in'):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type='or', fields=fields)
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type='regex', pattern=eq, dimension=col)
elif op == '>=':
cond = Bound(col, eq, None, alphaNumeric=is_numeric_col)
elif op == '<=':
cond = Bound(col, None, eq, alphaNumeric=is_numeric_col)
elif op == '>':
cond = Bound(
col, eq, None,
lowerStrict=True, alphaNumeric=is_numeric_col,
)
elif op == '<':
cond = Bound(
col, None, eq,
upperStrict=True, alphaNumeric=is_numeric_col,
)
if filters:
filters = Filter(type='and', fields=[
cond,
filters,
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>',
}
for flt in raw_filters:
if not all(f in flt for f in ['col', 'op', 'val']):
continue
col = flt['col']
op = flt['op']
eq = flt['val']
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
@classmethod
def query_datasources_by_name(
cls, session, database, datasource_name, schema=None):
return (
session.query(cls)
.filter_by(cluster_name=database.id)
.filter_by(datasource_name=datasource_name)
.all()
)
sa.event.listen(DruidDatasource, 'after_insert', set_perm)
sa.event.listen(DruidDatasource, 'after_update', set_perm)
| apache-2.0 |
hunse/vrep-python | dvs-play.py | 1 | 1515 | """
Play DVS events in real time
TODO: deal with looping event times for recordings > 65 s
"""
import numpy as np
import matplotlib.pyplot as plt
import dvs
def close(a, b, atol=1e-8, rtol=1e-5):
return np.abs(a - b) < atol + rtol * b
def imshow(image, ax=None):
ax = plt.gca() if ax is None else ax
ax.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
def add_to_image(image, events):
for x, y, s, _ in events:
image[y, x] += 1 if s else -1
def as_image(events):
image = np.zeros((128, 128), dtype=float)
add_to_image(image, events)
return image
# filename = 'dvs.npz'
filename = 'dvs-ball-10ms.npz'
events = dvs.load(filename, dt_round=True)
udiffs = np.unique(np.diff(np.unique(events['t'])))
assert np.allclose(udiffs, 0.01)
plt.figure(1)
plt.clf()
times = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
for i in range(6):
plt.subplot(2, 3, i+1)
imshow(as_image(events[close(events['t'], times[i])]))
plt.title("t = %0.3f" % times[i])
# plt.figure(1)
# plt.clf()
# image = np.zeros((128, 128), dtype=float)
# plt_image = plt.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation=None)
# plt.gca().invert_yaxis()
# while t0 < t_max:
# time.sleep(0.001)
# t1 = time.time() - t_world
# new_events = events[(ts > t0) & (ts < t1)]
# dt = t1 - t0
# image *= np.exp(-dt / 0.01)
# for x, y, s, _ in new_events:
# image[y, x] += 1 if s else -1
# plt_image.set_data(image)
# plt.draw()
# t0 = t1
plt.show()
| gpl-2.0 |
Barmaley-exe/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
Srisai85/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
joshbohde/scikit-learn | examples/plot_roc_crossval.py | 2 | 2019 | """
=============================================================
Receiver operating characteristic (ROC) with cross validation
=============================================================
Example of Receiver operating characteristic (ROC) metric to
evaluate the quality of the output of a classifier using
cross-validation.
"""
print __doc__
import numpy as np
from scipy import interp
import pylab as pl
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
################################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y!=2], y[y!=2]
n_samples, n_features = X.shape
# Add noisy features
X = np.c_[X,np.random.randn(n_samples, 200*n_features)]
################################################################################
# Classification and ROC analysis
# Run classifier with crossvalidation and plot ROC curves
cv = StratifiedKFold(y, k=6)
classifier = svm.SVC(kernel='linear', probability=True)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:,1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
pl.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
pl.plot([0, 1], [0, 1], '--', color=(0.6,0.6,0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
pl.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
pl.xlim([-0.05,1.05])
pl.ylim([-0.05,1.05])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title('Receiver operating characteristic example')
pl.legend(loc="lower right")
pl.show()
| bsd-3-clause |
Griger/Intel-CervicalCancer-KaggleCompetition | featureHOG.py | 1 | 1456 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import numpy as np
from math import pi
from keras.preprocessing.image import ImageDataGenerator
import cv2
from sklearn.cluster import KMeans
import sklearn.preprocessing as prepro
# Generamos nuevos ejemplos
'''
datagen = ImageDataGenerator(
rotation_range=180,
shear_range=pi,
fill_mode='nearest')
train_data = np.load('Datos/train244all.npy')
train_labels = np.load('Datos/train_target244all.npy')
datagen.fit(train_data,rounds=2)
i = 0
nuevas_imagenes = []
tam = 1
for batch in datagen.flow(train_data,train_labels,batch_size = (len(train_data))):
i += 1
if i > tam:
break
nuevas_imagenes.append(batch[0])
nuevas_imagenes = np.array(nuevas_imagenes)
nuevas_imagenes = np.reshape(nuevas_imagenes, (len(train_data)*tam,244,244,3))
np.save('Datos/extraRotations.npy', nuevas_imagenes, allow_pickle=True, fix_imports=True)
'''
train_data = np.load('Datos/train244all.npy')
test_data = np.load('Datos/test244.npy')
hog = cv2.HOGDescriptor()
def getHist(image):
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = image * 255
image = image.astype('uint8')
return hog.compute(image)
histograms = [getHist(img) for img in train_data]
if __name__ == '__main__':
# Guardar los histogramas
| gpl-3.0 |
WladimirSidorenko/SentiLex | scripts/vo.py | 1 | 10974 | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
"""Module for generating lexicon using Velikovich's method (2010).
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from collections import Counter
from copy import deepcopy
from datetime import datetime
from itertools import chain
from theano import tensor as TT
from sklearn.model_selection import train_test_split
import codecs
import numpy as np
import sys
import theano
from common import BTCH_SIZE, ENCODING, EPSILON, ESC_CHAR, FMAX, FMIN, \
INFORMATIVE_TAGS, MIN_TOK_CNT, \
NEGATIVE_IDX, NEUTRAL_IDX, POSITIVE_IDX, NONMATCH_RE, SENT_END_RE, \
TAB_RE, check_word, floatX, sgd_updates_adadelta
from common import POSITIVE as POSITIVE_LBL
from common import NEGATIVE as NEGATIVE_LBL
from germanet import normalize
##################################################################
# Constants
DFLT_T = 20
FASTMODE = False
MAX_NGHBRS = 25
TOK_WINDOW = 4 # it actually corresponds to a window of six
MAX_POS_IDS = 10000
MAX_EPOCHS = 5
MIN_EPOCHS = 3
UNK = "%unk%"
UNK_I = 0
##################################################################
# Methods
def _read_files_helper(a_crp_files, a_encoding=ENCODING):
"""Read corpus files and execute specified function.
@param a_crp_files - files of the original corpus
@param a_encoding - encoding of the vector file
@return (Iterator over file lines)
"""
i = 0
tokens_seen = False
for ifname in a_crp_files:
with codecs.open(ifname, 'r', a_encoding) as ifile:
for iline in ifile:
iline = iline.strip().lower()
if not iline or SENT_END_RE.match(iline):
continue
elif iline[0] == ESC_CHAR:
if FASTMODE:
i += 1
if i > 300:
break
if tokens_seen:
tokens_seen = False
yield None, None, None
continue
try:
iform, itag, ilemma = TAB_RE.split(iline)
except:
print("Invalid line format at line: {:s}".format(
repr(iline)), file=sys.stderr
)
continue
tokens_seen = True
yield iform, itag, normalize(ilemma)
yield None, None, None
def _read_files(a_crp_files, a_pos, a_neg, a_neut,
a_pos_re=NONMATCH_RE, a_neg_re=NONMATCH_RE,
a_encoding=ENCODING):
"""Read corpus files and populate one-directional co-occurrences.
@param a_crp_files - files of the original corpus
@param a_pos - initial set of positive terms
@param a_neg - initial set of negative terms
@param a_neut - initial set of neutral terms
@param a_pos_re - regular expression for matching positive terms
@param a_neg_re - regular expression for matching negative terms
@param a_encoding - encoding of the vector file
@return (word2vecid, x, y)
@note constructs statistics in place
"""
print("Populating corpus statistics...",
end="", file=sys.stderr)
word2cnt = Counter(ilemma
for _, itag, ilemma in _read_files_helper(a_crp_files,
a_encoding)
if ilemma is not None and itag[:2] in INFORMATIVE_TAGS
and check_word(ilemma))
print(" done", file=sys.stderr)
word2vecid = {UNK: UNK_I}
for w in chain(a_pos, a_neg, a_neut):
word2vecid[w] = len(word2vecid)
# convert words to vector ids if their counters are big enough
for w, cnt in word2cnt.iteritems():
if cnt >= MIN_TOK_CNT or a_pos_re.search(w) or a_neg_re.search(w):
word2vecid[w] = len(word2vecid)
word2cnt.clear()
# generate the training set
def check_in_seeds(a_form, a_lemma, a_seeds, a_seed_re):
if a_seed_re.search(a_form) or a_seed_re.search(a_lemma) \
or a_form in a_seeds or normalize(a_form) in a_seeds \
or a_lemma in a_seeds:
return True
return False
max_sent_len = 0
X = []
Y = []
toks = []
label = None
for iform, itag, ilemma in _read_files_helper(a_crp_files):
if ilemma is None:
if toks:
if label is not None:
max_sent_len = max(max_sent_len, len(toks))
X.append(deepcopy(toks))
Y.append(label)
del toks[:]
label = None
continue
if ilemma in word2vecid:
toks.append(word2vecid[ilemma])
if check_in_seeds(iform, ilemma, a_pos, a_pos_re):
label = POSITIVE_IDX
elif check_in_seeds(iform, ilemma, a_neg, a_neg_re):
label = NEGATIVE_IDX
elif label is None and check_in_seeds(iform, ilemma,
a_neut, NONMATCH_RE):
label = NEUTRAL_IDX
X = np.array(
[x + [UNK_I] * (max_sent_len - len(x))
for x in X], dtype="int32")
Y = np.array(Y, dtype="int32")
return (word2vecid, max_sent_len, X, Y)
def init_embeddings(vocab_size, k=3):
"""Uniformly initialze lexicon scores for each vocabulary word.
Args:
vocab_size (int): vocabulary size
k (int): dimensionality of embeddings
Returns:
2-tuple(theano.shared, int): embedding matrix, vector dimmensionality
"""
rand_vec = np.random.uniform(-0.25, 0.25, k)
W = floatX(np.broadcast_to(rand_vec,
(vocab_size, k)))
# zero-out the vector of unknown terms
W[UNK_I] *= 0.
return theano.shared(value=W, name='W'), k
def init_nnet(W, k):
"""Initialize neural network.
Args:
W (theano.shared): embedding matrix
k: dimensionality of the vector
"""
# `x' will be a matrix of size `m x n', where `m' is the mini-batch size
# and `n' is the maximum observed sentence length times the dimensionality
# of embeddings (`k')
x = TT.imatrix(name='x')
# `y' will be a vectors of size `m', where `m' is the mini-batch size
y = TT.ivector(name='y')
# `emb_sum' will be a matrix of size `m x k', where `m' is the mini-batch
# size and `k' is dimensionality of embeddings
emb_sum = W[x].sum(axis=1)
# it actually does not make sense to have an identity matrix in the
# network, but that's what the original Vo implemenation actually does
# W2S = theano.shared(value=floatX(np.eye(3)), name="W2S")
# y_prob = TT.nnet.softmax(TT.dot(W2S, emb_sum.T))
y_prob = TT.nnet.softmax(emb_sum)
y_pred = TT.argmax(y_prob, axis=1)
params = [W]
cost = -TT.mean(TT.log(y_prob)[TT.arange(y.shape[0]), y])
updates = sgd_updates_adadelta(params, cost)
train = theano.function([x, y], cost, updates=updates)
acc = TT.sum(TT.eq(y, y_pred))
validate = theano.function([x, y], acc)
zero_vec = TT.basic.zeros(k)
zero_out = theano.function([],
updates=[(W,
TT.set_subtensor(W[UNK_I, :],
zero_vec))])
return (train, validate, zero_out, params)
def vo(a_N, a_crp_files, a_pos, a_neg, a_neut,
a_pos_re=NONMATCH_RE, a_neg_re=NONMATCH_RE, a_encoding=ENCODING):
"""Method for generating sentiment lexicons using Velikovich's approach.
@param a_N - number of terms to extract
@param a_crp_files - files of the original corpus
@param a_pos - initial set of positive terms to be expanded
@param a_neg - initial set of negative terms to be expanded
@param a_neut - initial set of neutral terms to be expanded
@param a_pos_re - regular expression for matching positive terms
@param a_neg_re - regular expression for matching negative terms
@param a_encoding - encoding of the vector file
@return list of terms sorted according to their polarities
"""
# digitize training set
word2vecid, max_sent_len, X, Y = _read_files(
a_crp_files, a_pos, a_neg, a_neut, a_pos_re, a_neg_re,
a_encoding
)
# initianlize neural net and embedding matrix
W, k = init_embeddings(len(word2vecid))
train, validate, zero_out, params = init_nnet(W, k)
# organize minibatches and run the training
N = len(Y)
assert N, "Training set is empty."
train_idcs, devtest_idcs = train_test_split(
np.arange(N), test_size=0.1)
train_N = len(train_idcs)
devtest_N = float(len(devtest_idcs))
devtest_x = X[devtest_idcs[:]]
devtest_y = Y[devtest_idcs[:]]
btch_size = min(N, BTCH_SIZE)
epoch_i = 0
acc = 0
best_acc = -1
prev_acc = FMIN
best_params = []
while epoch_i < MAX_EPOCHS:
np.random.shuffle(train_idcs)
cost = acc = 0.
start_time = datetime.utcnow()
for start in np.arange(0, train_N, btch_size):
end = min(train_N, start + btch_size)
btch_x = X[train_idcs[start:end]]
btch_y = Y[train_idcs[start:end]]
cost += train(btch_x, btch_y)
zero_out()
acc = validate(devtest_x, devtest_y) / devtest_N
if acc >= best_acc:
best_params = [p.get_value() for p in params]
best_acc = acc
sfx = " *"
else:
sfx = ''
end_time = datetime.utcnow()
tdelta = (end_time - start_time).total_seconds()
print("Iteration #{:d} ({:.2f} sec): cost = {:.2f}, "
"accuracy = {:.2%};{:s}".format(epoch_i, tdelta, cost,
acc, sfx),
file=sys.stderr)
if abs(prev_acc - acc) < EPSILON and epoch_i > MIN_EPOCHS:
break
else:
prev_acc = acc
epoch_i += 1
if best_params:
for p, val in zip(params, best_params):
p.set_value(val)
W = W.get_value()
ret = []
for w, w_id in word2vecid.iteritems():
if w_id == UNK_I:
continue
elif w in a_pos or a_pos_re.search(w):
w_score = FMAX
elif w in a_neg or a_neg_re.search(w):
w_score = FMIN
else:
w_pol = np.argmax(W[w_id])
if w_pol == NEUTRAL_IDX:
continue
w_score = np.max(W[w_id])
if (w_pol == POSITIVE_IDX and w_score < 0.) \
or (w_pol == NEGATIVE_IDX and w_score > 0.):
w_score *= -1
ret.append((w,
POSITIVE_LBL if w_score > 0. else NEGATIVE_LBL,
w_score))
ret.sort(key=lambda el: abs(el[-1]), reverse=True)
if a_N >= 0:
del ret[a_N:]
return ret
| mit |
belkinsky/SFXbot | src/pyAudioAnalysis/audioTrainTest.py | 1 | 46228 | import sys
import numpy
import time
import os
import glob
import pickle
import shutil
import audioop
import signal
import csv
import ntpath
from . import audioFeatureExtraction as aF
from . import audioBasicIO
from matplotlib.mlab import find
import matplotlib.pyplot as plt
import scipy.io as sIO
from scipy import linalg as la
from scipy.spatial import distance
import sklearn.svm
import sklearn.decomposition
import sklearn.ensemble
def signal_handler(signal, frame):
print('You pressed Ctrl+C! - EXIT')
os.system("stty -cbreak echo")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
shortTermWindow = 0.050
shortTermStep = 0.050
eps = 0.00000001
class kNN:
def __init__(self, X, Y, k):
self.X = X
self.Y = Y
self.k = k
def classify(self, testSample):
nClasses = numpy.unique(self.Y).shape[0]
YDist = (distance.cdist(self.X, testSample.reshape(1, testSample.shape[0]), 'euclidean')).T
iSort = numpy.argsort(YDist)
P = numpy.zeros((nClasses,))
for i in range(nClasses):
P[i] = numpy.nonzero(self.Y[iSort[0][0:self.k]] == i)[0].shape[0] / float(self.k)
return (numpy.argmax(P), P)
def classifierWrapper(classifier, classifierType, testSample):
'''
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier
- classifierType: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees"
- testSample: a feature vector (numpy array)
RETURNS:
- R: class ID
- P: probability estimate
EXAMPLE (for some audio signal stored in array x):
import audioFeatureExtraction as aF
import audioTrainTest as aT
# load the classifier (here SVM, for kNN use loadKNNModel instead):
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep] = aT.loadSVModel(modelName)
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stStep));
# feature normalization:
curFV = (MidTermFeatures[:, i] - MEAN) / STD;
# classification
[Result, P] = classifierWrapper(Classifier, modelType, curFV)
'''
R = -1
P = -1
if classifierType == "knn":
[R, P] = classifier.classify(testSample)
elif classifierType == "svm" or classifierType == "randomforest" or classifierType == "gradientboosting" or "extratrees":
R = classifier.predict(testSample.reshape(1,-1))[0]
P = classifier.predict_proba(testSample.reshape(1,-1))[0]
return [R, P]
def regressionWrapper(model, modelType, testSample):
'''
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- model: regression model
- modelType: "svm" or "knn" (TODO)
- testSample: a feature vector (numpy array)
RETURNS:
- R: regression result (estimated value)
EXAMPLE (for some audio signal stored in array x):
TODO
'''
if modelType == "svm" or modelType == "randomforest":
return (model.predict(testSample.reshape(1,-1))[0])
# elif classifierType == "knn":
# TODO
return None
def randSplitFeatures(features, partTrain):
'''
def randSplitFeatures(features):
This function splits a feature set for training and testing.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- partTrain: percentage
RETURNS:
- featuresTrains: a list of training data for each class
- featuresTest: a list of testing data for each class
'''
featuresTrain = []
featuresTest = []
for i, f in enumerate(features):
[numOfSamples, numOfDims] = f.shape
randperm = numpy.random.permutation(list(range(numOfSamples)))
nTrainSamples = int(round(partTrain * numOfSamples))
featuresTrain.append(f[randperm[0:nTrainSamples]])
featuresTest.append(f[randperm[nTrainSamples::]])
return (featuresTrain, featuresTest)
def trainKNN(features, K):
'''
Train a kNN classifier.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- K: parameter K
RETURNS:
- kNN: the trained kNN variable
'''
[Xt, Yt] = listOfFeatures2Matrix(features)
knn = kNN(Xt, Yt, K)
return knn
def trainSVM(features, Cparam):
'''
Train a multi-class probabilitistic SVM classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- Cparam: SVM parameter C (cost of constraints violation)
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
svm = sklearn.svm.SVC(C = Cparam, kernel = 'linear', probability = True)
svm.fit(X,Y)
return svm
def trainRandomForest(features, n_estimators):
'''
Train a multi-class decision tree classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators)
rf.fit(X,Y)
return rf
def trainGradientBoosting(features, n_estimators):
'''
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators = n_estimators)
rf.fit(X,Y)
return rf
def trainExtraTrees(features, n_estimators):
'''
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality for extra tree classifiers
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
et = sklearn.ensemble.ExtraTreesClassifier(n_estimators = n_estimators)
et.fit(X,Y)
return et
def trainSVMregression(Features, Y, Cparam):
svm = sklearn.svm.SVR(C = Cparam, kernel = 'linear')
print(Features.shape, Y)
svm.fit(Features,Y)
trainError = numpy.mean(numpy.abs(svm.predict(Features) - Y))
return svm, trainError
# TODO (not avaiable for regression?)
#def trainRandomForestRegression(Features, Y, n_estimators):
# rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators)
# print Features.shape, Y
# rf.fit(Features,Y)
# trainError = numpy.mean(numpy.abs(rf.predict(Features) - Y))
# return rf, trainError
def featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, classifierType, modelName, computeBEAT=False, perTrain=0.90):
'''
This function is used as a wrapper to segment-based audio feature extraction and classifier training.
ARGUMENTS:
listOfDirs: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files.
mtWin, mtStep: mid-term window length and step
stWin, stStep: short-term window and step
classifierType: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees"
modelName: name of the model to be saved
RETURNS:
None. Resulting classifier along with the respective model parameters are saved on files.
'''
# STEP A: Feature Extraction:
[features, classNames, _] = aF.dirsWavFeatureExtraction(listOfDirs, mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT)
if len(features) == 0:
print("trainSVM_feature ERROR: No data found in any input folder!")
return
numOfFeatures = features[0].shape[1]
featureNames = ["features" + str(d + 1) for d in range(numOfFeatures)]
writeTrainDataToARFF(modelName, features, classNames, featureNames)
for i, f in enumerate(features):
if len(f) == 0:
print("trainSVM_feature ERROR: " + listOfDirs[i] + " folder is empty or non-existing!")
return
# STEP B: Classifier Evaluation and Parameter Selection:
if classifierType == "svm":
classifierParams = numpy.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0])
elif classifierType == "randomforest":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "knn":
classifierParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15])
elif classifierType == "gradientboosting":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "extratrees":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
# get optimal classifeir parameter:
bestParam = evaluateClassifier(features, classNames, 100, classifierType, classifierParams, 0, perTrain)
print("Selected params: {0:.5f}".format(bestParam))
C = len(classNames)
[featuresNorm, MEAN, STD] = normalizeFeatures(features) # normalize features
MEAN = MEAN.tolist()
STD = STD.tolist()
featuresNew = featuresNorm
# STEP C: Save the classifier to file
if classifierType == "svm":
Classifier = trainSVM(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "randomforest":
Classifier = trainRandomForest(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "gradientboosting":
Classifier = trainGradientBoosting(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "extratrees":
Classifier = trainExtraTrees(featuresNew, bestParam)
with open(modelName, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "knn":
[X, Y] = listOfFeatures2Matrix(featuresNew)
X = X.tolist()
Y = Y.tolist()
fo = open(modelName, "wb")
pickle.dump(X, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(Y, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(classNames, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(bestParam, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
def featureAndTrainRegression(dirName, mtWin, mtStep, stWin, stStep, modelType, modelName, computeBEAT=False):
'''
This function is used as a wrapper to segment-based audio feature extraction and classifier training.
ARGUMENTS:
dirName: path of directory containing the WAV files and Regression CSVs
mtWin, mtStep: mid-term window length and step
stWin, stStep: short-term window and step
modelType: "svm" or "knn" or "randomforest"
modelName: name of the model to be saved
RETURNS:
None. Resulting regression model along with the respective model parameters are saved on files.
'''
# STEP A: Feature Extraction:
[features, _, fileNames] = aF.dirsWavFeatureExtraction([dirName], mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT)
features = features[0]
fileNames = [ntpath.basename(f) for f in fileNames[0]]
# Read CSVs:
CSVs = glob.glob(dirName + os.sep + "*.csv")
regressionLabels = []
regressionNames = []
for c in CSVs: # for each CSV
curRegressionLabels = numpy.zeros((len(fileNames, ))) # read filenames, map to "fileNames" and append respective values in the regressionLabels
with open(c, 'rb') as csvfile:
CSVreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in CSVreader:
if len(row) == 2:
if row[0]+".wav" in fileNames:
index = fileNames.index(row[0]+".wav")
curRegressionLabels[index] = float(row[1])
regressionLabels.append(curRegressionLabels) # curRegressionLabels is the list of values for the current regression problem
regressionNames.append(ntpath.basename(c).replace(".csv", "")) # regression task name
if len(features) == 0:
print("ERROR: No data found in any input folder!")
return
numOfFeatures = features.shape[1]
# TODO: ARRF WRITE????
# STEP B: Classifier Evaluation and Parameter Selection:
if modelType == "svm":
modelParams = numpy.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0])
elif modelType == "randomforest":
modelParams = numpy.array([5, 10, 25, 50, 100])
# elif modelType == "knn":
# modelParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]);
for iRegression, r in enumerate(regressionNames):
# get optimal classifeir parameter:
print("Regression task " + r)
bestParam = evaluateRegression(features, regressionLabels[iRegression], 100, modelType, modelParams)
print("Selected params: {0:.5f}".format(bestParam))
[featuresNorm, MEAN, STD] = normalizeFeatures([features]) # normalize features
# STEP C: Save the model to file
if modelType == "svm":
Classifier, _ = trainSVMregression(featuresNorm[0], regressionLabels[iRegression], bestParam)
with open(modelName + "_" + r, 'wb') as fid: # save to file
pickle.dump(Classifier, fid)
fo = open(modelName + "_" + r + "MEANS", "wb")
pickle.dump(MEAN, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(STD, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(mtStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stWin, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(stStep, fo, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(computeBEAT, fo, protocol=pickle.HIGHEST_PROTOCOL)
fo.close()
''' TODO
elif modelType == "randomforest":
Classifier, _ = trainRandomForestRegression(featuresNorm[0], regressionLabels[iRegression], bestParam)
with open(modelName + "_" + r, 'wb') as fid: # save to file
cPickle.dump(Classifier, fid)
fo = open(modelName + "_" + r + "MEANS", "wb")
cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
'''
# elif classifierType == "knn":
def loadKNNModel(kNNModelName, isRegression=False):
try:
fo = open(kNNModelName, "rb")
except IOError:
print("didn't find file")
return
try:
X = pickle.load(fo)
Y = pickle.load(fo)
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
K = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
X = numpy.array(X)
Y = numpy.array(Y)
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
Classifier = kNN(X, Y, K) # Note: a direct call to the kNN constructor is used here
if isRegression:
return(Classifier, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadSVModel(SVMmodelName, isRegression=False):
'''
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(SVMmodelName+"MEANS", "rb")
except IOError:
print("Load SVM Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(SVMmodelName, 'rb') as fid:
SVM = pickle.load(fid)
if isRegression:
return(SVM, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(SVM, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadRandomForestModel(RFmodelName, isRegression=False):
'''
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(RFmodelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(RFmodelName, 'rb') as fid:
RF = pickle.load(fid)
if isRegression:
return(RF, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(RF, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadGradientBoostingModel(GBModelName, isRegression=False):
'''
This function loads gradient boosting either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(GBModelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(GBModelName, 'rb') as fid:
GB = pickle.load(fid)
if isRegression:
return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadExtraTreesModel(ETmodelName, isRegression=False):
'''
This function loads extra trees either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(ETmodelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = pickle.load(fo)
STD = pickle.load(fo)
if not isRegression:
classNames = pickle.load(fo)
mtWin = pickle.load(fo)
mtStep = pickle.load(fo)
stWin = pickle.load(fo)
stStep = pickle.load(fo)
computeBEAT = pickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(ETmodelName, 'rb') as fid:
GB = pickle.load(fid)
if isRegression:
return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def evaluateClassifier(features, ClassNames, nExp, ClassifierName, Params, parameterMode, perTrain=0.90):
'''
ARGUMENTS:
features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
ClassNames: list of class names (strings)
nExp: number of cross-validation experiments
ClassifierName: svm or knn or randomforest
Params: list of classifier parameters (for parameter tuning during cross-validation)
parameterMode: 0: choose parameters that lead to maximum overall classification ACCURACY
1: choose parameters that lead to maximum overall F1 MEASURE
RETURNS:
bestParam: the value of the input parameter that optimizes the selected performance measure
'''
# feature normalization:
(featuresNorm, MEAN, STD) = normalizeFeatures(features)
#featuresNorm = features;
nClasses = len(features)
CAll = []
acAll = []
F1All = []
PrecisionClassesAll = []
RecallClassesAll = []
ClassesAll = []
F1ClassesAll = []
CMsAll = []
# compute total number of samples:
nSamplesTotal = 0
for f in features:
nSamplesTotal += f.shape[0]
if nSamplesTotal > 1000 and nExp > 50:
nExp = 50
print("Number of training experiments changed to 50 due to high number of samples")
if nSamplesTotal > 2000 and nExp > 10:
nExp = 10
print("Number of training experiments changed to 10 due to high number of samples")
for Ci, C in enumerate(Params): # for each param value
CM = numpy.zeros((nClasses, nClasses))
for e in range(nExp): # for each cross-validation iteration:
print("Param = {0:.5f} - Classifier Evaluation Experiment {1:d} of {2:d}".format(C, e+1, nExp))
# split features:
featuresTrain, featuresTest = randSplitFeatures(featuresNorm, perTrain)
# train multi-class svms:
if ClassifierName == "svm":
Classifier = trainSVM(featuresTrain, C)
elif ClassifierName == "knn":
Classifier = trainKNN(featuresTrain, C)
elif ClassifierName == "randomforest":
Classifier = trainRandomForest(featuresTrain, C)
elif ClassifierName == "gradientboosting":
Classifier = trainGradientBoosting(featuresTrain, C)
elif ClassifierName == "extratrees":
Classifier = trainExtraTrees(featuresTrain, C)
CMt = numpy.zeros((nClasses, nClasses))
for c1 in range(nClasses):
#Results = Classifier.pred(featuresTest[c1])
nTestSamples = len(featuresTest[c1])
Results = numpy.zeros((nTestSamples, 1))
for ss in range(nTestSamples):
[Results[ss], _] = classifierWrapper(Classifier, ClassifierName, featuresTest[c1][ss])
for c2 in range(nClasses):
CMt[c1][c2] = float(len(numpy.nonzero(Results == c2)[0]))
CM = CM + CMt
CM = CM + 0.0000000010
Rec = numpy.zeros((CM.shape[0], ))
Pre = numpy.zeros((CM.shape[0], ))
for ci in range(CM.shape[0]):
Rec[ci] = CM[ci, ci] / numpy.sum(CM[ci, :])
Pre[ci] = CM[ci, ci] / numpy.sum(CM[:, ci])
PrecisionClassesAll.append(Pre)
RecallClassesAll.append(Rec)
F1 = 2 * Rec * Pre / (Rec + Pre)
F1ClassesAll.append(F1)
acAll.append(numpy.sum(numpy.diagonal(CM)) / numpy.sum(CM))
CMsAll.append(CM)
F1All.append(numpy.mean(F1))
# print "{0:6.4f}{1:6.4f}{2:6.1f}{3:6.1f}".format(nu, g, 100.0*acAll[-1], 100.0*F1All[-1])
print(("\t\t"), end=' ')
for i, c in enumerate(ClassNames):
if i == len(ClassNames)-1:
print("{0:s}\t\t".format(c), end=' ')
else:
print("{0:s}\t\t\t".format(c), end=' ')
print ("OVERALL")
print(("\tC"), end=' ')
for c in ClassNames:
print("\tPRE\tREC\tF1", end=' ')
print("\t{0:s}\t{1:s}".format("ACC", "F1"))
bestAcInd = numpy.argmax(acAll)
bestF1Ind = numpy.argmax(F1All)
for i in range(len(PrecisionClassesAll)):
print("\t{0:.3f}".format(Params[i]), end=' ')
for c in range(len(PrecisionClassesAll[i])):
print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 * PrecisionClassesAll[i][c], 100.0 * RecallClassesAll[i][c], 100.0 * F1ClassesAll[i][c]), end=' ')
print("\t{0:.1f}\t{1:.1f}".format(100.0 * acAll[i], 100.0 * F1All[i]), end=' ')
if i == bestF1Ind:
print("\t best F1", end=' ')
if i == bestAcInd:
print("\t best Acc", end=' ')
print()
if parameterMode == 0: # keep parameters that maximize overall classification accuracy:
print("Confusion Matrix:")
printConfusionMatrix(CMsAll[bestAcInd], ClassNames)
return Params[bestAcInd]
elif parameterMode == 1: # keep parameters that maximize overall F1 measure:
print("Confusion Matrix:")
printConfusionMatrix(CMsAll[bestF1Ind], ClassNames)
return Params[bestF1Ind]
def evaluateRegression(features, labels, nExp, MethodName, Params):
'''
ARGUMENTS:
features: numpy matrices of features [numOfSamples x numOfDimensions]
labels: list of sample labels
nExp: number of cross-validation experiments
MethodName: "svm" or "randomforest"
Params: list of classifier params to be evaluated
RETURNS:
bestParam: the value of the input parameter that optimizes the selected performance measure
'''
# feature normalization:
(featuresNorm, MEAN, STD) = normalizeFeatures([features])
featuresNorm = featuresNorm[0]
nSamples = labels.shape[0]
partTrain = 0.9
ErrorsAll = []
ErrorsTrainAll = []
ErrorsBaselineAll = []
for Ci, C in enumerate(Params): # for each param value
Errors = []
ErrorsTrain = []
ErrorsBaseline = []
for e in range(nExp): # for each cross-validation iteration:
# split features:
randperm = numpy.random.permutation(list(range(nSamples)))
nTrain = int(round(partTrain * nSamples))
featuresTrain = [featuresNorm[randperm[i]] for i in range(nTrain)]
featuresTest = [featuresNorm[randperm[i+nTrain]] for i in range(nSamples - nTrain)]
labelsTrain = [labels[randperm[i]] for i in range(nTrain)]
labelsTest = [labels[randperm[i + nTrain]] for i in range(nSamples - nTrain)]
# train multi-class svms:
featuresTrain = numpy.matrix(featuresTrain)
if MethodName == "svm":
[Classifier, trainError] = trainSVMregression(featuresTrain, labelsTrain, C)
# TODO
#elif MethodName == "randomforest":
# [Classifier, trainError] = trainRandomForestRegression(featuresTrain, labelsTrain, C)
# TODO KNN
# elif ClassifierName=="knn":
# Classifier = trainKNN(featuresTrain, C)
ErrorTest = []
ErrorTestBaseline = []
for itest, fTest in enumerate(featuresTest):
R = regressionWrapper(Classifier, MethodName, fTest)
Rbaseline = numpy.mean(labelsTrain)
ErrorTest.append((R - labelsTest[itest]) * (R - labelsTest[itest]))
ErrorTestBaseline.append((Rbaseline - labelsTest[itest]) * (Rbaseline - labelsTest[itest]))
Error = numpy.array(ErrorTest).mean()
ErrorBaseline = numpy.array(ErrorTestBaseline).mean()
Errors.append(Error)
ErrorsTrain.append(trainError)
ErrorsBaseline.append(ErrorBaseline)
ErrorsAll.append(numpy.array(Errors).mean())
ErrorsTrainAll.append(numpy.array(ErrorsTrain).mean())
ErrorsBaselineAll.append(numpy.array(ErrorsBaseline).mean())
bestInd = numpy.argmin(ErrorsAll)
print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE", "T-MSE", "R-MSE"))
for i in range(len(ErrorsAll)):
print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(Params[i], ErrorsAll[i], ErrorsTrainAll[i], ErrorsBaselineAll[i]), end=' ')
if i == bestInd:
print("\t\t best", end=' ')
print()
return Params[bestInd]
def printConfusionMatrix(CM, ClassNames):
'''
This function prints a confusion matrix for a particular classification task.
ARGUMENTS:
CM: a 2-D numpy array of the confusion matrix
(CM[i,j] is the number of times a sample from class i was classified in class j)
ClassNames: a list that contains the names of the classes
'''
if CM.shape[0] != len(ClassNames):
print("printConfusionMatrix: Wrong argument sizes\n")
return
for c in ClassNames:
if len(c) > 4:
c = c[0:3]
print("\t{0:s}".format(c), end=' ')
print()
for i, c in enumerate(ClassNames):
if len(c) > 4:
c = c[0:3]
print("{0:s}".format(c), end=' ')
for j in range(len(ClassNames)):
print("\t{0:.1f}".format(100.0 * CM[i][j] / numpy.sum(CM)), end=' ')
print()
def normalizeFeatures(features):
'''
This function normalizes a feature set to 0-mean and 1-std.
Used in most classifier trainning cases.
ARGUMENTS:
- features: list of feature matrices (each one of them is a numpy matrix)
RETURNS:
- featuresNorm: list of NORMALIZED feature matrices
- MEAN: mean vector
- STD: std vector
'''
X = numpy.array([])
for count, f in enumerate(features):
if f.shape[0] > 0:
if count == 0:
X = f
else:
X = numpy.vstack((X, f))
count += 1
MEAN = numpy.mean(X, axis=0)
STD = numpy.std(X, axis=0)
featuresNorm = []
for f in features:
ft = f.copy()
for nSamples in range(f.shape[0]):
ft[nSamples, :] = (ft[nSamples, :] - MEAN) / STD
featuresNorm.append(ft)
return (featuresNorm, MEAN, STD)
def listOfFeatures2Matrix(features):
'''
listOfFeatures2Matrix(features)
This function takes a list of feature matrices as argument and returns a single concatenated feature matrix and the respective class labels.
ARGUMENTS:
- features: a list of feature matrices
RETURNS:
- X: a concatenated matrix of features
- Y: a vector of class indeces
'''
X = numpy.array([])
Y = numpy.array([])
for i, f in enumerate(features):
if i == 0:
X = f
Y = i * numpy.ones((len(f), 1))
else:
X = numpy.vstack((X, f))
Y = numpy.append(Y, i * numpy.ones((len(f), 1)))
return (X, Y)
def pcaDimRed(features, nDims):
[X, Y] = listOfFeatures2Matrix(features)
pca = sklearn.decomposition.PCA(n_components = nDims)
pca.fit(X)
coeff = pca.components_
coeff = coeff[:, 0:nDims]
featuresNew = []
for f in features:
ft = f.copy()
# ft = pca.transform(ft, k=nDims)
ft = numpy.dot(f, coeff)
featuresNew.append(ft)
return (featuresNew, coeff)
def fileClassification(inputFile, modelName, modelType):
# Load classifier:
if not os.path.isfile(inputFile):
print("fileClassification: wav file not found!")
return (-1, -1, -1)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono
x = audioBasicIO.stereo2mono(x)
return fragmentClassification(Fs, x, modelName, modelType)
def fragmentClassification(Fs, x, modelName, modelType):
if not os.path.isfile(modelName):
print("fileClassification: input modelName not found!")
return (-1, -1, -1)
if modelType == 'svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(modelName)
elif modelType == 'knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(modelName)
elif modelType == 'randomforest':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadRandomForestModel(modelName)
elif modelType == 'gradientboosting':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadGradientBoostingModel(modelName)
elif modelType == 'extratrees':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = loadExtraTreesModel(modelName)
# feature extraction:
[MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics
if computeBEAT:
[beat, beatConf] = aF.beatExtraction(s, stStep)
MidTermFeatures = numpy.append(MidTermFeatures, beat)
MidTermFeatures = numpy.append(MidTermFeatures, beatConf)
curFV = (MidTermFeatures - MEAN) / STD # normalization
[Result, P] = classifierWrapper(Classifier, modelType, curFV) # classification
return Result, P, classNames
def fileRegression(inputFile, modelName, modelType):
# Load classifier:
if not os.path.isfile(inputFile):
print("fileClassification: wav file not found!")
return (-1, -1, -1)
regressionModels = glob.glob(modelName + "_*")
regressionModels2 = []
for r in regressionModels:
if r[-5::] != "MEANS":
regressionModels2.append(r)
regressionModels = regressionModels2
regressionNames = []
for r in regressionModels:
regressionNames.append(r[r.rfind("_")+1::])
# FEATURE EXTRACTION
# LOAD ONLY THE FIRST MODEL (for mtWin, etc)
if modelType == 'svm':
[_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(regressionModels[0], True)
elif modelType == 'knn':
[_, _, _, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(regressionModels[0], True)
[Fs, x] = audioBasicIO.readAudioFile(inputFile) # read audio file and convert to mono
x = audioBasicIO.stereo2mono(x)
# feature extraction:
[MidTermFeatures, s] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs * stWin), round(Fs * stStep))
MidTermFeatures = MidTermFeatures.mean(axis=1) # long term averaging of mid-term statistics
if computeBEAT:
[beat, beatConf] = aF.beatExtraction(s, stStep)
MidTermFeatures = numpy.append(MidTermFeatures, beat)
MidTermFeatures = numpy.append(MidTermFeatures, beatConf)
# REGRESSION
R = []
for ir, r in enumerate(regressionModels):
if not os.path.isfile(r):
print("fileClassification: input modelName not found!")
return (-1, -1, -1)
if modelType == 'svm':
[Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadSVModel(r, True)
elif modelType == 'knn':
[Model, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT] = loadKNNModel(r, True)
curFV = (MidTermFeatures - MEAN) / STD # normalization
R.append(regressionWrapper(Model, modelType, curFV)) # classification
return R, regressionNames
def lda(data, labels, redDim):
# Centre data
data -= data.mean(axis=0)
nData = numpy.shape(data)[0]
nDim = numpy.shape(data)[1]
print(nData, nDim)
Sw = numpy.zeros((nDim, nDim))
Sb = numpy.zeros((nDim, nDim))
C = numpy.cov((data.T))
# Loop over classes
classes = numpy.unique(labels)
for i in range(len(classes)):
# Find relevant datapoints
indices = (numpy.where(labels == classes[i]))
d = numpy.squeeze(data[indices, :])
classcov = numpy.cov((d.T))
Sw += float(numpy.shape(indices)[0])/nData * classcov
Sb = C - Sw
# Now solve for W
# Compute eigenvalues, eigenvectors and sort into order
#evals,evecs = linalg.eig(dot(linalg.pinv(Sw),sqrt(Sb)))
evals, evecs = la.eig(Sw, Sb)
indices = numpy.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
w = evecs[:, :redDim]
#print evals, w
newData = numpy.dot(data, w)
#for i in range(newData.shape[0]):
# plt.text(newData[i,0],newData[i,1],str(labels[i]))
#plt.xlim([newData[:,0].min(), newData[:,0].max()])
#plt.ylim([newData[:,1].min(), newData[:,1].max()])
#plt.show()
return newData, w
def writeTrainDataToARFF(modelName, features, classNames, featureNames):
f = open(modelName + ".arff", 'w')
f.write('@RELATION ' + modelName + '\n')
for fn in featureNames:
f.write('@ATTRIBUTE ' + fn + ' NUMERIC\n')
f.write('@ATTRIBUTE class {')
for c in range(len(classNames)-1):
f.write(classNames[c] + ',')
f.write(classNames[-1] + '}\n\n')
f.write('@DATA\n')
for c, fe in enumerate(features):
for i in range(fe.shape[0]):
for j in range(fe.shape[1]):
f.write("{0:f},".format(fe[i, j]))
f.write(classNames[c]+"\n")
f.close()
def trainSpeakerModelsScript():
'''
This script is used to train the speaker-related models (NOTE: data paths are hard-coded and NOT included in the library, the models are, however included)
import audioTrainTest as aT
aT.trainSpeakerModelsScript()
'''
mtWin = 2.0
mtStep = 2.0
stWin = 0.020
stStep = 0.020
dirName = "DIARIZATION_ALL/all"
listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))]
featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerAll", computeBEAT=False, perTrain=0.50)
dirName = "DIARIZATION_ALL/female_male"
listOfDirs = [os.path.join(dirName, name) for name in os.listdir(dirName) if os.path.isdir(os.path.join(dirName, name))]
featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, "knn", "data/knnSpeakerFemaleMale", computeBEAT=False, perTrain=0.50)
def main(argv):
return 0
if __name__ == '__main__':
main(sys.argv)
| mit |
cmbclh/vnpy1.7 | vnpy/trader/app/login/uiLoginWidget.py | 1 | 7055 | # encoding: UTF-8
'''
登陆模块相关的GUI控制组件
'''
import sys
sys.path.append('../')
#sys.path.append('D:\\tr\\vnpy-master\\vn.trader\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\DAO')
sys.path.append('D:\\tr\\vnpy-1.7\\vnpy\\common')
import vnpy.DAO
import vnpy.common
from vnpy.DAO import *
import pandas as pd
import Tkinter
#from Tkinter import messagebox
from vnpy.trader.app.login.language import text
from vnpy.trader.uiBasicWidget import QtWidgets
TBUSER_COLUMNS = ['user_id','user_name','status','password','branch_no','open_date','cancel_date','passwd_date','op_group','op_rights','reserve1','dep_id','last_logon_date','last_logon_time','last_ip_address','fail_times','fail_date','reserve2','last_fail_ip']
########################################################################
class LoginSpinBox(QtWidgets.QLineEdit):#.QSpinBox):
"""调整参数用的数值框"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
super(LoginSpinBox, self).__init__()
#self.setMinimum(0)
#self.setMaximum(1000000)
self.setText(value)
########################################################################
class LoginLine(QtWidgets.QFrame):
"""水平分割线"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(LoginLine, self).__init__()
self.setFrameShape(self.HLine)
self.setFrameShadow(self.Sunken)
########################################################################
class LoginEngineManager(QtWidgets.QWidget):
"""风控引擎的管理组件"""
#----------------------------------------------------------------------
def __init__(self, loginEngine, eventEngine, parent=None):
"""Constructor"""
super(LoginEngineManager, self).__init__(parent)
self.loginEngine = loginEngine
self.eventEngine = eventEngine
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
print self
self.setWindowTitle(text.LOGIN_MANAGER)
# 设置界面
self.userId = LoginSpinBox(self.loginEngine.userId)
self.password = LoginSpinBox(self.loginEngine.password)
buttonLogin = QtWidgets.QPushButton(text.LOGIN)
buttonLogout = QtWidgets.QPushButton(text.LOGOUT)
buttonSubmit = QtWidgets.QPushButton(text.SUBMIT)
Label = QtWidgets.QLabel
grid = QtWidgets.QGridLayout()
grid.addWidget(Label(text.USERID), 2, 0)
grid.addWidget(self.userId, 2, 1)
grid.addWidget(Label(text.PASSWORD), 3, 0)
grid.addWidget(self.password, 3, 1)
grid.addWidget(LoginLine(), 4, 0, 1, 2)
hbox = QtWidgets.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(buttonSubmit)
hbox.addWidget(buttonLogin)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(grid)
vbox.addLayout(hbox)
self.setLayout(vbox)
# 连接组件信号
buttonSubmit.clicked.connect(self.submit)
buttonLogin.clicked.connect(self.login)
# 设为固定大小
self.setFixedSize(self.sizeHint())
# ----------------------------------------------------------------------
def login(self):
print (u'登陆验证开始self.userId=%s, self.password=%s' % (self.userId, self.password))
userId = str(self.userId.text())
password = str(self.password.text())
print (u'登陆验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT *' \
' from tbuser where user_id = \'%s\' and password = \'%s\' and status = 0 ' % (userId, password)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
if ret.empty :
print (u'登陆验证失败,用户不存在或密码不正确')
#QtWidgets.QMessageBox.information(self, "登陆失败", "用户不存在或密码不正确,请重试!", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
QtWidgets.QMessageBox.information(self, text.LOGINERROR,text.LOGINERRORINFO,
QtWidgets.QMessageBox.Retry)
#Tkinter.messagebox.showinfo('登陆验证失败,用户不存在或密码不正确')
else:
print (u'登陆验证成功')
QtWidgets.QMessageBox.information(self, text.LOGINSUSS, text.LOGINSUSSINFO, QtWidgets.QMessageBox.Ok)
self.close()
#Tkinter.messagebox.showinfo('欢迎')
except Exception as e:
print e
# ----------------------------------------------------------------------
def logout(self):
pass
# ----------------------------------------------------------------------
def submit(self):
userId = str(self.userId.text())
password = str(self.password.text())
print (u'注册验证开始userId=%s, password=%s' % (userId, password))
# 根据以下条件查询出的有效用户只有一条记录
sql = ' SELECT user_id,status' \
' from tbuser where user_id = \'%s\' ' % (userId)
try:
ret = vnpy.DAO.getDataBySQL('vnpy', sql)
#若系统中无该用户,则直接插入注册
if ret.empty:
print (u'无此客户信息,可直接注册')
userData = [userId, userId, 0, password, '', 0, 0, 0, '', ' ', ' ', '', 0, 0, '', 0, 0, ' ', '']
d = pd.DataFrame([userData], columns=TBUSER_COLUMNS)
try:
print("开始写入TBUSER中")
vnpy.DAO.writeData('vnpy', 'tbuser', d)
print (u'注册成功')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e1:
print (u'注册失败')
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Retry)
print e1
# 若系统中有该用户,则修改状态及密码,激活用户
else:
#暂时空
QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITFAIL, QtWidgets.QMessageBox.Ok)
self.close()
except Exception as e:
print e
#QtWidgets.QMessageBox.information(self, text.SUBMIT, text.SUBMITSUSS, QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
# ----------------------------------------------------------------------
def closeLoginEngineManager(self):
self.close()
pass | mit |
Texju/DIT-Machine_Learning | Codes/validation.py | 1 | 1983 | # -*- coding: utf-8 -*-
"""
Created on Wed May 24 15:00:15 2017
@author: Julien Couillard & Jean Thevenet
"""
from sklearn import cross_validation
from sklearn import metrics
import numpy
class MLValidation:
"""This class calculates the preciseness of our tree against a set of data"""
def __init__(self, tree):
self.__tree = tree
self.__targets = []
self.__predictions = []
def test(self, data):
""" Testing the model """
# Train our model
instances_train, target_train = self.__tree.prepareData(data.Training)
self.__tree.Tree.fit(instances_train, target_train)
# Test the model
instances_test, target_test = self.__tree.prepareData(data.Testing)
self.__targets = target_test
#Use the model to make predictions for the test set queries
self.__predictions = self.__tree.Tree.predict(instances_test)
def test_KFoldCrossValidation(self, data, k):
instances_train, target_train = self.__tree.prepareData(data.Raw)
scores=cross_validation.cross_val_score(self.__tree.Tree, instances_train, target_train, cv=k)
return scores
def testNaiveAlwaysYes(self, data):
""" Test our targets against a matrix that always return - 50000"""
self.test(data)
self.__predictions[:] = " - 50000."
def confusionMatrix(self):
if len(self.__predictions) != 0:
return metrics.confusion_matrix(self.__targets, self.__predictions)
def accuracy(self):
return metrics.accuracy_score(self.__targets, self.__predictions, normalize=True)
def accuracy_harmonic(self):
t = self.__targets.replace(" - 50000.","yes")
t = t.replace(" 50000+.","no")
p = numpy.copy(self.__predictions)
p[p == " - 50000."] = "yes"
p[p == " 50000+."] = "no"
return metrics.f1_score(t, p, pos_label="yes") | gpl-3.0 |
SU-ECE-17-7/hotspotter | _graveyard/voting_rules1.py | 2 | 20685 |
#_________________
# OLD
def build_voters_profile(hs, qcx, K):
'''This is too similar to assign_matches_vsmany right now'''
cx2_nx = hs.tables.cx2_nx
hs.ensure_matcher(match_type='vsmany', K=K)
K += 1
cx2_desc = hs.feats.cx2_desc
cx2_kpts = hs.feats.cx2_kpts
cx2_rchip_size = hs.get_cx2_rchip_size()
desc1 = cx2_desc[qcx]
args = hs.matcher.vsmany_args
vsmany_flann = args.vsmany_flann
ax2_cx = args.ax2_cx
ax2_fx = args.ax2_fx
print('[invest] Building voter preferences over %s indexed descriptors. K=%r' %
(helpers.commas(len(ax2_cx)), K))
nn_args = (args, qcx, cx2_kpts, cx2_desc, cx2_rchip_size, K+1)
nn_result = mc2.vsmany_nearest_neighbors(*nn_args)
(qfx2_ax, qfx2_dists, qfx2_valid) = nn_result
vote_dists = qfx2_dists[:, 0:K]
norm_dists = qfx2_dists[:, K] # k+1th descriptor for normalization
# Score the feature matches
qfx2_score = np.array([mc2.LNBNN_fn(_vdist.T, norm_dists)
for _vdist in vote_dists.T]).T
# Vote using the inverted file
qfx2_cx = ax2_cx[qfx2_ax[:, 0:K]]
qfx2_fx = ax2_fx[qfx2_ax[:, 0:K]]
qfx2_valid = qfx2_valid[:, 0:K]
qfx2_nx = temporary_names(qfx2_cx, cx2_nx[qfx2_cx], zeroed_cx_list=[qcx])
voters_profile = (qfx2_nx, qfx2_cx, qfx2_fx, qfx2_score, qfx2_valid)
return voters_profile
#def filter_alternative_frequencies2(alternative_ids1, qfx2_altx1, correct_altx, max_cands=32):
def filter_alternative_frequencies(alternative_ids1, qfx2_altx1, correct_altx, max_cands=32):
'determines the alternatives who appear the most and filters out the least occuring'
alternative_ids = alternative_ids.copy()
qfx2_altx = qfx2_altx.copy()
altx2_freq = np.bincount(qfx2_altx.flatten()+1)[1:]
smallest_altx = altx2_freq.argsort()
smallest_cfreq = altx2_freq[smallest_altx]
smallest_thresh = len(smallest_cfreq) - max_cands
print('Current num alternatives = %r. Truncating to %r' % (len(altx2_freq), max_cands))
print('Frequency stats: '+str(helpers.mystats(altx2_freq[altx2_freq != 0])))
print('Correct alternative frequency = %r' % altx2_freq[correct_altx])
print('Correct alternative frequency rank = %r' % (np.where(smallest_altx == correct_altx)[0],))
if smallest_thresh > -1:
freq_thresh = smallest_cfreq[smallest_thresh]
print('Truncating at rank = %r' % smallest_thresh)
print('Truncating at frequency = %r' % freq_thresh)
to_remove_altx, = np.where(altx2_freq <= freq_thresh)
qfx2_remove = np.in1d(qfx2_altx.flatten(), to_remove_altx)
qfx2_remove.shape = qfx2_altx.shape
qfx2_altx[qfx2_remove] = -1
keep_ids = True - np.in1d(alternative_ids, alternative_ids[to_remove_altx])
alternative_ids = alternative_ids[keep_ids]
return alternative_ids, qfx2_altx
def temporary_names(cx_list, nx_list, zeroed_cx_list=[], zeroed_nx_list=[]):
'''Test Input:
nx_list = np.array([(1, 5, 6), (2, 4, 0), (1, 1, 1), (5, 5, 5)])
cx_list = np.array([(2, 3, 4), (5, 6, 7), (8, 9, 10), (4, 5, 5)])
zeroed_nx_list = []
zeroed_cx_list = [3]
'''
zeroed_cx_list = set(zeroed_cx_list)
tmp_nx_list = []
for ix, (cx, nx) in enumerate(zip(cx_list.flat, nx_list.flat)):
if cx in zeroed_cx_list:
tmp_nx_list.append(0)
elif nx in zeroed_nx_list:
tmp_nx_list.append(0)
elif nx >= 2:
tmp_nx_list.append(nx)
else:
tmp_nx_list.append(-cx)
tmp_nx_list = np.array(tmp_nx_list)
tmp_nx_list = tmp_nx_list.reshape(cx_list.shape)
return tmp_nx_list
def build_pairwise_votes(alternative_ids, qfx2_altx):
'''
Divides full rankings over alternatives into pairwise rankings.
Assumes that the breaking has already been applied.
e.g.
alternative_ids = [0,1,2]
qfx2_altx = np.array([(0, 1, 2), (1, 2, 0)])
'''
nAlts = len(alternative_ids)
def generate_pairwise_votes(partial_order, compliment_order):
pairwise_winners = [partial_order[rank:rank+1]
for rank in xrange(0, len(partial_order))]
pairwise_losers = [np.hstack((compliment_order, partial_order[rank+1:]))
for rank in xrange(0, len(partial_order))]
pairwise_vote_list = [helpers.cartesian((pwinners, plosers)) for pwinners, plosers
in zip(pairwise_winners, pairwise_losers)]
pairwise_votes = np.vstack(pairwise_vote_list)
return pairwise_votes
pairwise_mat = np.zeros((nAlts, nAlts))
nVoters = len(qfx2_altx)
progstr = helpers.make_progress_fmt_str(nVoters, lbl='[voting] building P(d)')
for ix, qfx in enumerate(xrange(nVoters)):
helpers.print_(progstr % (ix+1))
partial_order = qfx2_altx[qfx]
partial_order = partial_order[partial_order != -1]
if len(partial_order) == 0: continue
compliment_order = np.setdiff1d(alternative_ids, partial_order)
pairwise_votes = generate_pairwise_votes(partial_order, compliment_order)
def sum_win(ij): pairwise_mat[ij[0], ij[1]] += 1 # pairiwse wins on off-diagonal
def sum_loss(ij): pairwise_mat[ij[1], ij[1]] -= 1 # pairiwse wins on off-diagonal
map(sum_win, iter(pairwise_votes))
map(sum_loss, iter(pairwise_votes))
# Divide num voters
PLmatrix = pairwise_mat / nVoters # = P(D) = Placket Luce GMoM function
return PLmatrix
def optimize(M):
'''
alternative_ids = [0,1,2]
qfx2_altx = np.array([(0,1,2), (1,0,2)])
M = PLmatrix
M = pairwise_voting(alternative_ids, qfx2_altx)
M = array([[-0.5, 0.5, 1. ],
[ 0.5, -0.5, 1. ],
[ 0. , 0. , -2. ]])
'''
print(r'[vote] x = argmin_x ||Mx||_2, s.t. ||x||_2 = 1')
m = M.shape[0]
x0 = np.ones(m)/np.sqrt(m)
f = lambda x, M: linalg.norm(M.dot(x))
con = lambda x: linalg.norm(x) - 1
cons = {'type':'eq', 'fun': con}
print('[vote] running optimization')
with helpers.Timer() as t:
res = scipy.optimize.minimize(f, x0, args=(M,), constraints=cons)
x = res['x']
xnorm = linalg.norm(x)
gamma = np.abs(x / xnorm)
print('[voting_rules] x = %r' % (x,))
print('[voting_rules] xnorm = %r' % (xnorm,))
print('[voting_rules] gamma = %r' % (gamma,))
return gamma
def optimize2():
x = linalg.solve(M, np.zeros(M.shape[0]))
x /= linalg.norm(x)
def PlacketLuce(vote, gamma):
''' e.g.
gamma = optimize()
vote = np.arange(len(gamma))
np.random.shuffle(vote)
pr = PlacketLuce(vote, gamma)
print(vote)
print(pr)
print('----')
'''
m = len(vote)-1
pl_term = lambda x: gamma[vote[x]] / gamma[vote[x:]].sum()
prob = np.prod([pl_term(x) for x in xrange(m)])
return prob
#----
def viz_votingrule_table(ranked_candiates, ranked_scores, correct_altx, title, fnum):
num_top = 5
correct_rank = np.where(ranked_candiates == correct_altx)[0]
if len(correct_rank) > 0:
correct_rank = correct_rank[0]
correct_score = ranked_scores[correct_rank]
np.set_printoptions(precision=1)
top_cands = ranked_candiates[0:num_top]
top_scores = ranked_scores[0:num_top]
print('[vote] top%r ranked cands = %r' % (num_top, top_scores))
print('[vote] top%r ranked scores = %r' % (num_top, top_cands))
print('[vote] correct candid = %r ' % correct_altx)
print('[vote] correct ranking / score = %r / %r ' % (correct_rank, correct_score))
print('----')
np.set_printoptions(precision=8)
plt = df2.plt
df2.figure(fignum=fnum, doclf=True, subplot=(1,1,1))
ax=plt.gca()
#plt.plot([10,10,14,14,10],[2,4,4,2,2],'r')
col_labels=map(lambda x: '%8d' % x, np.arange(num_top)+1)
row_labels=['cand ids ',
'cand scores ',
'correct ranking ',
'correct score ']
table_vals=[map(lambda x: '%8d' % x, top_cands),
map(lambda x: '%8.2f' % x, top_scores),
['%8d' % (correct_rank)] + [' '] * (num_top-1),
['%8.2f' % correct_score] + [' '] * (num_top-1)]
#matplotlib.table.Table
# the rectangle is where I want to place the table
#the_table = plt.table(cellText=table_vals,
#rowLabels=row_labels,
#colLabels=col_labels,
#colWidths = [0.1]*num_top,
#loc='center')
def latex_table(row_labels, col_labels, table_vals):
#matplotlib.rc('text', usetex=True)
#print('col_labels=%r' % col_labels)
#print('row_labels=%r' % row_labels)
#print('table_vals=%r' % table_vals)
nRows = len(row_labels)
nCols = len(col_labels)
def tableline(list_, rowlbl):
return rowlbl + ' & '+(' & '.join(list_))+'\\\\'
collbl = tableline(col_labels, ' '*16)
col_strs = [collbl, '\hline'] + [tableline(rowvals, rowlbl) for rowlbl, rowvals in zip(row_labels, table_vals)]
col_split = '\n'
body = col_split.join(col_strs)
col_placement = ' c || '+(' | '.join((['c']*nCols)))
latex_str = textwrap.dedent(r'''
\begin{tabular}{%s}
%s
\end{tabular}
''') % (col_placement, helpers.indent(body))
print(latex_str)
plt.text(0, 0, latex_str, fontsize=14,
horizontalalignment='left',
verticalalignment='bottom',
fontname='Courier New')
#family='monospaced')
#print(matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf'))
#print(matplotlib.font_manager.findfont('Courier'))
#fontname
r'''
\begin{tabular}{ c || c | c | c | c | c}
& 1 & 2 & 3 & 4 & 5\\
\hline
cand ids & 3 & 38 & 32 & 40 & 5\\
cand scores & 4512.0 & 4279.0 & 4219.0 & 4100.0 & 3960.0\\
correct ranking & 25 & & & & \\
correct score & 1042.0 & & & & \\
\end{tabular}
'''
latex_table(row_labels, col_labels, table_vals)
df2.set_figtitle(title)
def voting_rule(alternative_ids, qfx2_altx, qfx2_weight=None, rule='borda',
correct_altx=None, fnum=1):
K = qfx2_altx.shape[1]
if rule == 'borda':
score_vec = np.arange(0,K)[::-1]
if rule == 'plurality':
score_vec = np.zeros(K); score_vec[0] = 1
if rule == 'topk':
score_vec = np.ones(K)
score_vec = np.array(score_vec, dtype=np.int)
print('----')
title = 'Rule=%s Weighted=%r ' % (rule, not qfx2_weight is None)
print('[vote] ' + title)
print('[vote] score_vec = %r' % (score_vec,))
alt_score = weighted_positional_scoring_rule(alternative_ids, qfx2_altx, score_vec, qfx2_weight)
ranked_candiates = alt_score.argsort()[::-1]
ranked_scores = alt_score[ranked_candiates]
viz_votingrule_table(ranked_candiates, ranked_scores, correct_altx, title, fnum)
return ranked_candiates, ranked_scores
def weighted_positional_scoring_rule(alternative_ids,
qfx2_altx, score_vec,
qfx2_weight=None):
nAlts = len(alternative_ids)
alt_score = np.zeros(nAlts)
if qfx2_weight is None:
qfx2_weight = np.ones(qfx2_altx.shape)
for qfx in xrange(len(qfx2_altx)):
partial_order = qfx2_altx[qfx]
weights = qfx2_weight[qfx]
# Remove impossible votes
weights = weights[partial_order != -1]
partial_order = partial_order[partial_order != -1]
for ix, altx in enumerate(partial_order):
alt_score[altx] += weights[ix] * score_vec[ix]
return alt_score
def _normalize_voters_profile(hs, qcx, voters_profile):
'''Applies a temporary labeling scheme'''
cx2_nx = hs.tables.cx2_nx
(qfx2_nx, qfx2_cx, qfx2_fx, qfx2_score, qfx2_valid) = voters_profile
# Apply temporary alternative labels
alts_cxs = np.unique(qfx2_cx[qfx2_valid].flatten())
alts_nxs = np.setdiff1d(np.unique(qfx2_nx[qfx2_valid].flatten()), [0])
nx2_altx = {nx:altx for altx, nx in enumerate(alts_nxs)}
nx2_altx[0] = -1
qfx2_altx = np.copy(qfx2_nx)
old_shape = qfx2_altx.shape
qfx2_altx.shape = (qfx2_altx.size,)
for i in xrange(len(qfx2_altx)):
qfx2_altx[i] = nx2_altx[qfx2_altx[i]]
qfx2_altx.shape = old_shape
alternative_ids = np.arange(0, len(alts_nxs))
correct_altx = nx2_altx[cx2_nx[qcx]] # Ground truth labels
qfx2_weight = qfx2_score
return alternative_ids, qfx2_altx, qfx2_weight, correct_altx
def viz_PLmatrix(PLmatrix, qfx2_altx=None, correct_altx=None, alternative_ids=None, fnum=1):
if alternative_ids is None:
alternative_ids = []
if correct_altx is None:
correct_altx = -1
if qfx2_altx is None:
nVoters = -1
else:
nVoters = len(qfx2_altx)
# Separate diagonal and off diagonal
PLdiagonal = np.diagonal(PLmatrix)
PLdiagonal.shape = (len(PLdiagonal), 1)
PLoffdiag = PLmatrix.copy(); np.fill_diagonal(PLoffdiag, 0)
# Build a figure
fig = df2.plt.gcf()
fig.clf()
# Show the off diagonal
colormap = 'hot'
ax = fig.add_subplot(121)
cax = ax.imshow(PLoffdiag, interpolation='nearest', cmap=colormap)
stride = int(np.ceil(np.log10(len(alternative_ids)))+1)*10
correct_id = alternative_ids[correct_altx]
alternative_ticks = sorted(alternative_ids[::stride].tolist() + [correct_id])
ax.set_xticks(alternative_ticks)
ax.set_xticklabels(alternative_ticks)
ax.set_yticks(alternative_ticks)
ax.set_yticklabels(alternative_ticks)
ax.set_xlabel('candiate ids')
ax.set_ylabel('candiate ids.')
ax.set_title('Off-Diagonal')
fig.colorbar(cax, orientation='horizontal')
# Show the diagonal
ax = fig.add_subplot(122)
def duplicate_cols(M, nCols):
return np.tile(M, (1, nCols))
nCols = len(PLdiagonal) / 2
cax2 = ax.imshow(duplicate_cols(PLdiagonal, nCols), interpolation='nearest', cmap=colormap)
ax.set_title('diagonal')
ax.set_xticks([])
ax.set_yticks(alternative_ticks)
ax.set_yticklabels(alternative_ticks)
df2.set_figtitle('Correct ID=%r' % (correct_id))
fig.colorbar(cax2, orientation='horizontal')
fig.subplots_adjust(left=0.05, right=.99,
bottom=0.01, top=0.88,
wspace=0.01, hspace=0.01)
#plt.set_cmap('jet', plt.cm.jet,norm = LogNorm())
def test_voting_rules(hs, qcx, K, fnum=1):
voters_profile = build_voters_profile(hs, qcx, K)
normal_profile = _normalize_voters_profile(hs, qcx, voters_profile)
alternative_ids, qfx2_altx, qfx2_weight, correct_altx = normal_profile
#alternative_ids, qfx2_altx = filter_alternative_frequencies(alternative_ids, qfx2_altx, correct_altx)
m = len(alternative_ids)
n = len(qfx2_altx)
k = len(qfx2_altx.T)
bigo_breaking = helpers.int_comma_str((m+k)*k*n)
bigo_gmm = helpers.int_comma_str(int(m**2.376))
bigo_gmm3 = helpers.int_comma_str(int(m**3))
print('[voting] m = num_alternatives = %r ' % len(alternative_ids))
print('[voting] n = nVoters = %r ' % len(qfx2_altx))
print('[voting] k = top_k_breaking = %r ' % len(qfx2_altx.T))
print('[voting] Computing breaking O((m+k)*k*n) = %s' % bigo_breaking)
print('[voting] Computing GMoM breaking O(m^{2.376}) < O(m^3) = %s < %s' % (bigo_gmm, bigo_gmm3))
#---
def voting_rule_(weighting, rule_name, fnum):
ranking = voting_rule(alternative_ids, qfx2_altx, weighting, rule_name, correct_altx, fnum)
return ranking, fnum + 1
#weighted_topk_ranking, fnum = voting_rule_(qfx2_weight, 'topk', fnum)
#weighted_borda_ranking, fnum = voting_rule_(qfx2_weight, 'borda', fnum)
#weighted_plurality_ranking, fnum = voting_rule_(qfx2_weight, 'plurality', fnum)
#topk_ranking, fnum = voting_rule_(None, 'topk', fnum)
#borda_ranking, fnum = voting_rule_(None, 'borda', fnum)
#plurality_ranking, fnum = voting_rule_(None, 'plurality', fnum)
#---
PLmatrix = build_pairwise_votes(alternative_ids, qfx2_altx)
viz_PLmatrix(PLmatrix, qfx2_altx, correct_altx, alternative_ids, fnum)
# Took 52 seconds on bakerstreet with (41x41) matrix
gamma = optimize(PLmatrix) # (41x41) -> 52 seconds
gamma = optimize(PLmatrix[:-1,:-1]) # (40x40) -> 83 seconds
gamma = optimize(PLmatrix[:-11,:-11]) # (30x30) -> 45 seconds)
gamma = optimize(PLmatrix[:-21,:-21]) # (20x20) -> 21 seconds)
gamma = optimize(PLmatrix[:-31,:-31]) # (10x10) -> 4 seconds)
gamma = optimize(PLmatrix[:-36,:-36]) # ( 5x 5) -> 2 seconds)
def PlacketLuceWinnerProb(gamma):
nAlts = len(gamma)
mask = np.ones(nAlts, dtype=np.bool)
ax2_prob = np.zeros(nAlts)
for ax in xrange(nAlts):
mask[ax] = False
ax2_prob[ax] = gamma[ax] / np.sum(gamma[mask])
mask[ax] = True
ax2_prob = ax2_prob / ax2_prob.sum()
return ax2_prob
ax2_prob = PlacketLuceWinnerProb(gamma)
pl_ranking = ax2_prob.argsort()[::-1]
pl_confidence = ax2_prob[pl_ranking]
correct_rank = np.where(pl_ranking == correct_altx)[0][0]
ranked_altxconf = zip(pl_ranking, pl_confidence)
print('Top 5 Ranked altx/confidence = %r' % (ranked_altxconf[0:5],))
print('Correct Rank=%r altx/confidence = %r' % (correct_rank, ranked_altxconf[correct_rank],))
df2.update()
#b = np.zeros(4)
#b[-1] = 1
#[- + +]
#[+ - +] x = b
#[+ + -] 1
#[1 0 0]
#X = np.vstack([M,[1,0,0]])
#print(X)
#print(b)
#x = linalg.solve(X, b)
def test():
from numpy import linalg
linalg.lstsq
'''
Test Data:
K = 5
votes = [(3,2,1,4), (4,1,2,3), (4, 2, 3, 1), (1, 2, 3, 4)]
qfx2_utilities = [[(nx, nx, nx**3, k) for k, nx in enumerate(vote)] for vote in votes]
M, altx2_nx= _utilities2_pairwise_breaking(qfx2_utilities)
from numpy.linalg import svd, inv
from numpy import eye, diag, zeros
#Because s is sorted, and M is rank deficient, the value s[-1] should be 0
np.set_printoptions(precision=2, suppress=True, linewidth=80)
#The svd is:
#u * s * v = M
u.dot(diag(s)).dot(v) = M
#u is unitary:
inv(u).dot(u) == eye(len(s))
diag(s).dot(v) == inv(u).dot(M)
u.dot(diag(s)) == M.dot(inv(v))
And because s[-1] is 0
u.dot(diag(s))[:,-1:] == zeros((len(s),1))
Because we want to find Mx = 0
So flip the left and right sides
M.dot(inv(v)[:,-1:]) == u.dot(diag(s))[:,-1:]
And you find
M = M
x = inv(v)[:,-1:]
0 = u.dot(diag(s))[:,-1:]
So we have the solution to our problem as x = inv(v)[:,-1:]
Furthermore it is true that
inv(v)[:,-1:].T == v[-1:,:]
because v is unitary and the last vector in v corresponds to a singular
vector because M is rank m-1
ALSO: v.dot(inv(v)) = eye(len(s)) so
v[-1].dot(inv(v)[:,-1:]) == 1
this means that v[-1] is non-zero, and v[-1].T == inv(v[:,-1:])
So all of this can be done as...
'''
# We could also say
def eq(M1, M2):
print(str(M1)+'\n = \n'+str(M2))
# Compute SVD
(u, s_, v) = linalg.svd(M)
s = diag(s_)
#---
print('-------')
print('M =\n%s' % (M,))
print('-------')
print('u =\n%s' % (u,))
print('-------')
print('s =\n%s' % (s,))
print('-------')
print('v =\n%s' % (v,))
print('-------')
print('u s v = M')
eq(u.dot(s).dot(v), M)
# We want to find Mx = 0
print('-------')
print('The last value of s is zeros because M is rank m-1 and s is sorted')
print('s =\n%s' % (s,))
print('-------')
print('Therefore the last column of u.dot(s) is zeros')
print('v is unitary so v.T = inv(v)')
print('u s = M v.T')
eq(u.dot(s), M.dot(v.T))
print('-------')
print('We want to find Mx = 0, and the last column of LHS corresponds to this')
print('u s = M v.T')
eq(u.dot(s), M.dot(v.T))
# The right column u.dot(s) is
#Ok, so v[-1] can be negative, but that's ok
# its unitary, we can just negate it.
# or we can take the absolute value or l2 normalize it
# x = v[-1] = inv(v)[:,-1]
# so
# x.dot(x) == 1
# hmmmm
# I need to find a way to proove
# components of x are all negative or all
# positive
# Verify s is 0
x = v[-1]
| apache-2.0 |
wxiang7/airflow | airflow/hooks/dbapi_hook.py | 3 | 7095 |
from builtins import str
from past.builtins import basestring
from datetime import datetime
import numpy
import logging
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_pandas_df(self, sql, parameters=None):
'''
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn, params=parameters)
conn.close()
return df
def get_records(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
conn = self.get_conn()
cur = self.get_cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_first(self, sql, parameters=None):
'''
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
'''
conn = self.get_conn()
cur = conn.cursor()
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
rows = cur.fetchone()
cur.close()
conn.close()
return rows
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
conn = self.get_conn()
if isinstance(sql, basestring):
sql = [sql]
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
cur = conn.cursor()
for s in sql:
logging.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
cur.close()
conn.commit()
conn.close()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
l.append(self._serialize_cell(cell))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
if commit_every and i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell):
if isinstance(cell, basestring):
return "'" + str(cell).replace("'", "''") + "'"
elif cell is None:
return 'NULL'
elif isinstance(cell, numpy.datetime64):
return "'" + str(cell) + "'"
elif isinstance(cell, datetime):
return "'" + cell.isoformat() + "'"
else:
return str(cell)
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
lenovor/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
beepee14/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
arcyfelix/ML-DL-AI | Supervised Learning/GANs/GAN.py | 1 | 3364 | # -*- coding: utf-8 -*-
""" GAN Example
Use a generative adversarial network (GAN) to generate digit images from a
noise distribution.
References:
- Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
processing systems, 2672-2680.
Links:
- [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data()
image_dim = 784 # 28*28 pixels
z_dim = 200 # Noise data points
total_samples = len(X)
# Generator
def generator(x, reuse=False):
with tf.variable_scope('Generator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
return x
# Discriminator
def discriminator(x, reuse=False):
with tf.variable_scope('Discriminator', reuse=reuse):
x = tflearn.fully_connected(x, 256, activation='relu')
x = tflearn.fully_connected(x, 1, activation='sigmoid')
return x
# Build Networks
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, 784], name='disc_input')
gen_sample = generator(gen_input)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)
# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample, placeholder=None, optimizer='adam',
loss=gen_loss, trainable_vars=gen_vars,
batch_size=64, name='target_gen', op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real, placeholder=None, optimizer='adam',
loss=disc_loss, trainable_vars=disc_vars,
batch_size=64, name='target_disc', op_name='DISC')
# Define GAN model, that output the generated images.
gan = tflearn.DNN(gen_model)
# Training
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[total_samples, z_dim])
# Start training, feed both noise and real images.
gan.fit(X_inputs={gen_input: z, disc_input: X},
Y_targets=None,
n_epoch=100)
# Generate images from noise, using the generator network.
f, a = plt.subplots(2, 10, figsize=(10, 4))
for i in range(10):
for j in range(2):
# Noise input.
z = np.random.uniform(-1., 1., size=[1, z_dim])
# Generate image from noise. Extend to 3 channels for matplot figure.
temp = [[ii, ii, ii] for ii in list(gan.predict([z])[0])]
a[j][i].imshow(np.reshape(temp, (28, 28, 3)))
f.show()
plt.draw()
plt.waitforbuttonpress() | apache-2.0 |
cauchycui/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/feature_selection/tests/test_base.py | 143 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
Daniel-Brosnan-Blazquez/DIT-100 | debugging/trajectory_planning_profiles/trapezoidal-profile.py | 1 | 7690 | import numpy
import time
from matplotlib import pyplot
def main (params):
angle = params['p0']
vel = params['v0']
sign = params['sign']
# Plan the trajectory if it is not planned
T = 0
Ta = 0
Td = 0
dt = params['dt']
if not params['trajectory']:
# Maximum acceleration and velocity values in degrees/s^2 and
# degrees/s respectively
amax = params['acc_limit_d']*sign*(-1)
vmax = params['vel_limit']*sign*(-1)
v0 = vel
h = angle
vlim = vmax
# Check if the trajectory is feasible
print "abs (amax*h) >= v0**2/2.0 = %s" % (abs (amax*h) >= v0**2/2.0)
if abs (amax*h) >= v0**2/2.0:
# The trajectory is feasible
# Check if the maximum value of velocity can be reached
if abs (h*amax) > vmax**2 - v0**2/2.0:
# The maximum value of velocity can be reached
Ta = (vmax - v0)/amax
Td = vmax/amax
term1 = abs (h/vmax)
term2 = (vmax/(2*amax)) * (1 - (v0/vmax))**2
term3 = (vmax/(2*amax))
T = term1 + term2 + term3
else:
# The maximum value of velocity can't be reached
vlim = ((abs (h * amax) + v0**2/2.0)**(1/2.0))*sign*(-1)
Ta = abs ((vlim - v0)/amax)
Td = abs (vlim/amax)
T = Ta + Td
# end if
# The time has to be positive
Ta = abs (Ta)
Td = abs (Td)
T = abs (T)
print "Ta = %s, Td = %s" % (Ta, Td)
params['trajectory'] = True
params['T'] = T
params['Ta'] = Ta
params['Td'] = Td
params['T_sign'] = sign*(-1)
params['vv'] = vlim
# if Ta > dt and Td > dt:
# params['trajectory'] = True
# params['T'] = T
# params['Ta'] = Ta
# params['Td'] = Td
# params['T_sign'] = sign*(-1)
# params['vv'] = vlim
# else:
# Ta = 0
# Td = 0
# T = 0
# end if
# end if
return
def plot (params):
t = 0
interval = params['dt']
# Sign
sign = params['T_sign']
# Maximum values
amax = params['acc_limit_d']*sign
vmax = params['vel_limit']*sign
# Buffers to store the motion
positions = []
vels = []
accs = []
# Initial values of the motion
v0 = params['v0']
p0 = params['p0']
vv = params['vv']
T = params['T']
Ta = params['Ta']
Td = params['Td']
# Acceleration phase
while t < Ta:
# Position
pos = p0 + v0*t + ((vv - v0)/(2*Ta))*t**2
positions.append (pos)
# Velocity
vel = v0 + ((vv - v0)/(Ta))*t
vels.append (vel)
# Acceleration
acc = (vv - v0)/Ta
accs.append (acc)
t += interval
# end while
# Constant velocity phase
while t < (T - Td):
# Position
pos = p0 + v0*(Ta/2.0) + vv*(t-(Ta/2.0))
positions.append (pos)
# Velocity
vel = vv
vels.append (vel)
# Acceleration
acc = 0
accs.append (acc)
t += interval
# end while
# Deceleration phase
while t < T:
# Position
pos = 0 - (vv/(2*Td))*(T-t)**2
positions.append (pos)
# Velocity
vel = (vv/Td)*(T-t)
vels.append (vel)
# Acceleration
acc = -(vv/Td)
accs.append (acc)
t += interval
# end while
fig = pyplot.figure (1, figsize = (20,10))
s = fig.add_subplot (311)
p, = s.plot(positions)
s.grid (True)
s.set_title ("position")
s = fig.add_subplot (312)
p, = s.plot(vels)
s.grid (True)
s.set_title ("velocity")
s = fig.add_subplot (313)
p, = s.plot(accs)
s.grid (True)
s.set_title ("acceleration")
pyplot.show ()
pyplot.close (1)
return
if __name__ == "__main__":
params = {}
# Period
params['dt'] = 0.015
# Flag to indicate if it is necessary to compute the trajectory
# (not needed here)
params['trajectory'] = False
# Velocity, acceleration and jerk limits in degrees/s^2
params['vel_limit'] = 150.0
rad_to_degrees = 180.0/numpy.pi
radius = 0.3
# m/s^2
params['acc_limit'] = 7.5
# degrees/s^2
params['acc_limit_d'] = (params['acc_limit']*rad_to_degrees)/radius
# # p0 = 0. Checked, trajectory unfeasible
# # p0
# params['p0'] = 0.0
# # v0
# params['v0'] = 100.0
# p0 > 50 v0 = 0. Checked, trajectory feasible
# p0
params['p0'] = 80.0
# v0
params['v0'] = 0.0
# # p0 > 50 v0 < limit. Checked, trajectory feasible
# # p0
# params['p0'] = 80.0
# # v0
# params['v0'] = 50.0
# # p0 > 50 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = 80.0
# # v0
# params['v0'] = 100.0
# # p0 > 50 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = 80.0
# # v0
# params['v0'] = -150.0
# # p0 < 50 p0 > 0 v0 = 0. Checked, trajectory feasible
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 0.0
# # p0 < 50 p0 > 0 v0 < limit. REVIEW IT!!!!!!!!!
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 50.0
# # p0 < 50 p0 > 0 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 100.0
# # p0 < 50 p0 > 0 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = 20.0
# # v0
# params['v0'] = 150.0
# # p0 < -50 v0 = 0. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 0.0
# # p0 < -50 v0 < limit. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 50.0
# # p0 < -50 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 100.0
# # p0 < -50 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = -80.0
# # v0
# params['v0'] = 150.0
# # p0 > -50 p0 < 0 v0 = 0. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 0.0
# # p0 > -50 p0 < 0 v0 < limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = -50.0
# # p0 > -50 p0 < 0 v0 = limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 100.0
# # p0 > -50 p0 < 0 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 150.0
# # p0 > -50 p0 < 0 v0 > limit. Checked, trajectory feasible
# # p0
# params['p0'] = -20.0
# # v0
# params['v0'] = 200.0
# sign
params['sign'] = 1
# params['sign'] = -1
# # p0
# params['p0'] = 11.0962258945
# # params['p0'] = 22.0
# # v0
# params['v0'] = 71.19
# # params['v0'] = 0.0
main(params)
print "Trajectory performed: %s" % params['trajectory']
if params['trajectory']:
T = params['T']
Ta = params['Ta']
Td = params['Td']
print "T = %s, Ta = %s, Td = %s" %(T, Ta, Td)
plot (params)
| gpl-3.0 |
samuel1208/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
rigetticomputing/grove | grove/tomography/state_tomography.py | 1 | 11664 | ##############################################################################
# Copyright 2017-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import logging
import numpy as np
import matplotlib.pyplot as plt
from pyquil.quilbase import Pragma
from scipy.sparse import csr_matrix, coo_matrix
from pyquil.quil import Program
import grove.tomography.operator_utils
from grove.tomography.tomography import TomographyBase, TomographySettings, DEFAULT_SOLVER_KWARGS
from grove.tomography import tomography
import grove.tomography.utils as ut
import grove.tomography.operator_utils as o_ut
_log = logging.getLogger(__name__)
qt = ut.import_qutip()
cvxpy = ut.import_cvxpy()
UNIT_TRACE = 'unit_trace'
POSITIVE = 'positive'
DEFAULT_STATE_TOMO_SETTINGS = TomographySettings(
constraints={UNIT_TRACE},
solver_kwargs=DEFAULT_SOLVER_KWARGS
)
def _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops):
"""
Prepare the coefficient matrix for state tomography. This function uses sparse matrices
for much greater efficiency.
The coefficient matrix is defined as:
.. math::
C_{(jk)m} = \tr{\Pi_{s_j} \Lambda_k(P_m)} = \sum_{r}\pi_{jr}(\mathcal{R}_{k})_{rm}
where :math:`\Lambda_k(\cdot)` is the quantum map corresponding to the k-th pre-measurement
channel, i.e., :math:`\Lambda_k(\rho) = E_k \rho E_k^\dagger` where :math:`E_k` is the k-th
channel operator. This map can also be represented via its transfer matrix
:math:`\mathcal{R}_{k}`. In that case one also requires the overlap between the (generalized)
Pauli basis ops and the projection operators
:math:`\pi_{jl}:=\sbraket{\Pi_j}{P_l} = \tr{\Pi_j P_l}`.
See the grove documentation on tomography for detailed information.
:param DiagonalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param OperatorBasis pauli_basis: The (generalized) Pauli basis employed in the estimation.
:param list channel_ops: The pre-measurement channel operators as `qutip.Qobj`
:return: The coefficient matrix necessary to set up the binomial state tomography problem.
:rtype: scipy.sparse.csr_matrix
"""
channel_transfer_matrices = [pauli_basis.transfer_matrix(qt.to_super(ek)) for ek in channel_ops]
# This bit could be more efficient but does not run super long and is thus preserved for
# readability.
pi_jr = csr_matrix(
[pauli_basis.project_op(n_j).toarray().ravel()
for n_j in readout_povm.ops])
# Dict used for constructing our sparse matrix, keys are tuples (row_index, col_index), values
# are the non-zero elements of the final matrix.
c_jk_m_elms = {}
# This explicitly exploits the sparsity of all operators involved
for k in range(len(channel_ops)):
pi_jr__rk_rm = (pi_jr * channel_transfer_matrices[k]).tocoo()
for (j, m, val) in ut.izip(pi_jr__rk_rm.row, pi_jr__rk_rm.col, pi_jr__rk_rm.data):
# The multi-index (j,k) is enumerated in column-major ordering (like Fortran arrays)
c_jk_m_elms[(j + k * readout_povm.pi_basis.dim, m)] = val.real
# create sparse matrix from COO-format (see scipy.sparse docs)
_keys, _values = ut.izip(*c_jk_m_elms.items())
_rows, _cols = ut.izip(*_keys)
c_jk_m = coo_matrix((list(_values), (list(_rows), list(_cols))),
shape=(readout_povm.pi_basis.dim * len(channel_ops),
pauli_basis.dim)).tocsr()
return c_jk_m
class StateTomography(TomographyBase):
"""
A StateTomography object encapsulates the result of quantum state estimation from tomographic
data. It provides convenience functions for visualization and computing state fidelities.
"""
__tomography_type__ = "STATE"
@staticmethod
def estimate_from_ssr(histograms, readout_povm, channel_ops, settings):
"""
Estimate a density matrix from single shot histograms obtained by measuring bitstrings in
the Z-eigenbasis after application of given channel operators.
:param numpy.ndarray histograms: The single shot histograms, `shape=(n_channels, dim)`.
:param DiagognalPOVM readout_povm: The POVM corresponding to the readout plus classifier.
:param list channel_ops: The tomography measurement channels as `qutip.Qobj`'s.
:param TomographySettings settings: The solver and estimation settings.
:return: The generated StateTomography object.
:rtype: StateTomography
"""
nqc = len(channel_ops[0].dims[0])
pauli_basis = grove.tomography.operator_utils.PAULI_BASIS ** nqc
pi_basis = readout_povm.pi_basis
if not histograms.shape[1] == pi_basis.dim: # pragma no coverage
raise ValueError("Currently tomography is only implemented for two-level systems.")
# prepare the log-likelihood function parameters, see documentation
n_kj = np.asarray(histograms)
c_jk_m = _prepare_c_jk_m(readout_povm, pauli_basis, channel_ops)
rho_m = cvxpy.Variable(pauli_basis.dim)
p_jk = c_jk_m * rho_m
obj = -n_kj.ravel() * cvxpy.log(p_jk)
p_jk_mat = cvxpy.reshape(p_jk, pi_basis.dim, len(channel_ops)) # cvxpy has col-major order
# Default constraints:
# MLE must describe valid probability distribution
# i.e., for each k, p_jk must sum to one and be element-wise non-negative:
# 1. \sum_j p_jk == 1 for all k
# 2. p_jk >= 0 for all j, k
# where p_jk = \sum_m c_jk_m rho_m
constraints = [
p_jk >= 0,
np.matrix(np.ones((1, pi_basis.dim))) * p_jk_mat == 1,
]
rho_m_real_imag = sum((rm * o_ut.to_realimag(Pm)
for (rm, Pm) in ut.izip(rho_m, pauli_basis.ops)), 0)
if POSITIVE in settings.constraints:
if tomography._SDP_SOLVER.is_functional():
constraints.append(rho_m_real_imag >> 0)
else: # pragma no coverage
_log.warning("No convex solver capable of semi-definite problems installed.\n"
"Dropping the positivity constraint on the density matrix.")
if UNIT_TRACE in settings.constraints:
# this assumes that the first element of the Pauli basis is always proportional to
# the identity
constraints.append(rho_m[0, 0] == 1. / pauli_basis.ops[0].tr().real)
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
_log.info("Starting convex solver")
prob.solve(solver=tomography.SOLVER, **settings.solver_kwargs)
if prob.status != cvxpy.OPTIMAL: # pragma no coverage
_log.warning("Problem did not converge to optimal solution. "
"Solver settings: {}".format(settings.solver_kwargs))
return StateTomography(np.array(rho_m.value).ravel(), pauli_basis, settings)
def __init__(self, rho_coeffs, pauli_basis, settings):
"""
Construct a StateTomography to encapsulate the result of estimating the quantum state from
a quantum tomography measurement.
:param numpy.ndarray r_est: The estimated quantum state represented in a given (generalized)
Pauli basis.
:param OperatorBasis pauli_basis: The employed (generalized) Pauli basis.
:param TomographySettings settings: The settings used to estimate the state.
"""
self.rho_coeffs = rho_coeffs
self.pauli_basis = pauli_basis
self.rho_est = sum((r_m * p_m for r_m, p_m in ut.izip(rho_coeffs, pauli_basis.ops)))
self.settings = settings
def fidelity(self, other):
"""
Compute the quantum state fidelity of the estimated state with another state.
:param qutip.Qobj other: The other quantum state.
:return: The fidelity, a real number between 0 and 1.
:rtype: float
"""
return qt.fidelity(self.rho_est, other)
def plot_state_histogram(self, ax):
"""
Visualize the complex matrix elements of the estimated state.
:param matplotlib.Axes ax: A matplotlib Axes object to plot into.
"""
title = "Estimated state"
nqc = int(round(np.log2(self.rho_est.data.shape[0])))
labels = ut.basis_labels(nqc)
return ut.state_histogram(self.rho_est, ax, title)
def plot(self):
"""
Visualize the state.
:return: The generated figure.
:rtype: matplotlib.Figure
"""
width = 10
# The pleasing golden ratio.
height = width / 1.618
f = plt.figure(figsize=(width, height))
ax = f.add_subplot(111, projection="3d")
self.plot_state_histogram(ax)
return f
def state_tomography_programs(state_prep, qubits=None,
rotation_generator=tomography.default_rotations):
"""
Yield tomographic sequences that prepare a state with Quil program `state_prep` and then append
tomographic rotations on the specified `qubits`. If `qubits is None`, it assumes all qubits in
the program should be tomographically rotated.
:param Program state_prep: The program to prepare the state to be tomographed.
:param list|NoneType qubits: A list of Qubits or Numbers, to perform the tomography on. If
`None`, performs it on all in state_prep.
:param generator rotation_generator: A generator that yields tomography rotations to perform.
:return: Program for state tomography.
:rtype: Program
"""
if qubits is None:
qubits = state_prep.get_qubits()
for tomography_program in rotation_generator(*qubits):
state_tomography_program = Program(Pragma("PRESERVE_BLOCK"))
state_tomography_program.inst(state_prep)
state_tomography_program.inst(tomography_program)
state_tomography_program.inst(Pragma("END_PRESERVE_BLOCK"))
yield state_tomography_program
def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False):
"""
Method to perform both a QPU and QVM state tomography, and use the latter as
as reference to calculate the fidelity of the former.
:param Program preparation_program: Program to execute.
:param int nsamples: Number of samples to take for the program.
:param QVMConnection|QPUConnection cxn: Connection on which to run the program.
:param list qubits: List of qubits for the program.
to use in the tomography analysis.
:param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run``
instead of ``cxn.run_and_measure``.
:return: The state tomogram.
:rtype: StateTomography
"""
return tomography._do_tomography(preparation_program, nsamples, cxn, qubits,
tomography.MAX_QUBITS_STATE_TOMO,
StateTomography, state_tomography_programs,
DEFAULT_STATE_TOMO_SETTINGS, use_run=use_run)
| apache-2.0 |
kevin-intel/scikit-learn | sklearn/datasets/_openml.py | 2 | 34451 | import gzip
import json
import os
import shutil
import hashlib
from os.path import join
from warnings import warn
from contextlib import closing
from functools import wraps
from typing import Callable, Optional, Dict, Tuple, List, Any, Union
import itertools
from collections.abc import Generator
from collections import OrderedDict
from functools import partial
from urllib.request import urlopen, Request
import numpy as np
import scipy.sparse
from ..externals import _arff
from ..externals._arff import ArffSparseDataType, ArffContainerType
from . import get_data_home
from urllib.error import HTTPError
from ..utils import Bunch
from ..utils import is_scalar_nan
from ..utils import get_chunk_n_rows
from ..utils import _chunk_generator
from ..utils import check_pandas_support # noqa
__all__ = ['fetch_openml']
_OPENML_PREFIX = "https://openml.org/"
_SEARCH_NAME = "api/v1/json/data/list/data_name/{}/limit/2"
_DATA_INFO = "api/v1/json/data/{}"
_DATA_FEATURES = "api/v1/json/data/features/{}"
_DATA_QUALITIES = "api/v1/json/data/qualities/{}"
_DATA_FILE = "data/v1/download/{}"
OpenmlQualitiesType = List[Dict[str, str]]
OpenmlFeaturesType = List[Dict[str, str]]
def _get_local_path(openml_path: str, data_home: str) -> str:
return os.path.join(data_home, 'openml.org', openml_path + ".gz")
def _retry_with_clean_cache(
openml_path: str, data_home: Optional[str]
) -> Callable:
"""If the first call to the decorated function fails, the local cached
file is removed, and the function is called again. If ``data_home`` is
``None``, then the function is called once.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if data_home is None:
return f(*args, **kw)
try:
return f(*args, **kw)
except HTTPError:
raise
except Exception:
warn("Invalid cache, redownloading file", RuntimeWarning)
local_path = _get_local_path(openml_path, data_home)
if os.path.exists(local_path):
os.unlink(local_path)
return f(*args, **kw)
return wrapper
return decorator
def _open_openml_url(openml_path: str, data_home: Optional[str]):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
openml_path : str
OpenML URL that will be accessed. This will be prefixes with
_OPENML_PREFIX
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
Returns
-------
result : stream
A stream to the OpenML resource
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get('Content-Encoding', '') == 'gzip'
req = Request(_OPENML_PREFIX + openml_path)
req.add_header('Accept-encoding', 'gzip')
if data_home is None:
fsrc = urlopen(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode='rb')
return fsrc
local_path = _get_local_path(openml_path, data_home)
if not os.path.exists(local_path):
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
# potentially, the directory has been created already
pass
try:
with closing(urlopen(req)) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(local_path, 'wb') as fdst:
shutil.copyfileobj(fsrc, fdst)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, 'rb')
class OpenMLError(ValueError):
"""HTTP 412 is a specific OpenML error code, indicating a generic error"""
pass
def _get_json_content_from_openml_api(
url: str,
error_message: Optional[str],
data_home: Optional[str]
) -> Dict:
"""
Loads json data from the openml api
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message)
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home)
def _load_json():
with closing(_open_openml_url(url, data_home)) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""
obtains several columns from sparse arff representation. Additionally, the
column indices are re-labelled, given the columns that are not included.
(e.g., when including [1, 2, 3], the columns will be relabelled to
[0, 1, 2])
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
def _sparse_data_to_array(
arff_data: ArffSparseDataType, include_columns: List
) -> np.ndarray:
# turns the sparse data back into an array (can't use toarray() function,
# as this does only work on numeric data)
num_obs = max(arff_data[1]) + 1
y_shape = (num_obs, len(include_columns))
reindexed_columns = {column_idx: array_idx for array_idx, column_idx
in enumerate(include_columns)}
# TODO: improve for efficiency
y = np.empty(y_shape, dtype=np.float64)
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
y[row_idx, reindexed_columns[col_idx]] = val
return y
def _convert_arff_data(
arff: ArffContainerType,
col_slice_x: List[int],
col_slice_y: List[int],
shape: Optional[Tuple] = None
) -> Tuple:
"""
converts the arff object into the appropriate matrix type (np.array or
scipy.sparse.csr_matrix) based on the 'data part' (i.e., in the
liac-arff dict, the object from the 'data' key)
Parameters
----------
arff : dict
As obtained from liac-arff object.
col_slice_x : list
The column indices that are sliced from the original array to return
as X data
col_slice_y : list
The column indices that are sliced from the original array to return
as y data
Returns
-------
X : np.array or scipy.sparse.csr_matrix
y : np.array
"""
arff_data = arff['data']
if isinstance(arff_data, Generator):
if shape is None:
raise ValueError(
"shape must be provided when arr['data'] is a Generator"
)
if shape[0] == -1:
count = -1
else:
count = shape[0] * shape[1]
data = np.fromiter(itertools.chain.from_iterable(arff_data),
dtype='float64', count=count)
data = data.reshape(*shape)
X = data[:, col_slice_x]
y = data[:, col_slice_y]
return X, y
elif isinstance(arff_data, tuple):
arff_data_X = _split_sparse_columns(arff_data, col_slice_x)
num_obs = max(arff_data[1]) + 1
X_shape = (num_obs, len(col_slice_x))
X = scipy.sparse.coo_matrix(
(arff_data_X[0], (arff_data_X[1], arff_data_X[2])),
shape=X_shape, dtype=np.float64)
X = X.tocsr()
y = _sparse_data_to_array(arff_data, col_slice_y)
return X, y
else:
# This should never happen
raise ValueError('Unexpected Data Type obtained from arff.')
def _feature_to_dtype(feature: Dict[str, str]):
"""Map feature to dtype for pandas DataFrame
"""
if feature['data_type'] == 'string':
return object
elif feature['data_type'] == 'nominal':
return 'category'
# only numeric, integer, real are left
elif (feature['number_of_missing_values'] != '0' or
feature['data_type'] in ['numeric', 'real']):
# cast to floats when there are any missing values
return np.float64
elif feature['data_type'] == 'integer':
return np.int64
raise ValueError('Unsupported feature: {}'.format(feature))
def _convert_arff_data_dataframe(
arff: ArffContainerType, columns: List, features_dict: Dict[str, Any]
) -> Tuple:
"""Convert the ARFF object into a pandas DataFrame.
Parameters
----------
arff : dict
As obtained from liac-arff object.
columns : list
Columns from dataframe to return.
features_dict : dict
Maps feature name to feature info from openml.
Returns
-------
result : tuple
tuple with the resulting dataframe
"""
pd = check_pandas_support('fetch_openml with as_frame=True')
attributes = OrderedDict(arff['attributes'])
arff_columns = list(attributes)
if not isinstance(arff['data'], Generator):
raise ValueError(
"arff['data'] must be a generator when converting to pd.DataFrame."
)
# calculate chunksize
first_row = next(arff['data'])
first_df = pd.DataFrame([first_row], columns=arff_columns)
row_bytes = first_df.memory_usage(deep=True).sum()
chunksize = get_chunk_n_rows(row_bytes)
# read arff data with chunks
columns_to_keep = [col for col in arff_columns if col in columns]
dfs = []
dfs.append(first_df[columns_to_keep])
for data in _chunk_generator(arff['data'], chunksize):
dfs.append(pd.DataFrame(data, columns=arff_columns)[columns_to_keep])
df = pd.concat(dfs, ignore_index=True)
for column in columns_to_keep:
dtype = _feature_to_dtype(features_dict[column])
if dtype == 'category':
cats_without_missing = [cat for cat in attributes[column]
if cat is not None and
not is_scalar_nan(cat)]
dtype = pd.api.types.CategoricalDtype(cats_without_missing)
df[column] = df[column].astype(dtype, copy=False)
return (df, )
def _get_data_info_by_name(
name: str, version: Union[int, str], data_home: Optional[str]
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
res = json_data['data']['dataset']
if len(res) > 1:
warn("Multiple active versions of the dataset matching the name"
" {name} exist. Versions may be fundamentally different, "
"returning version"
" {version}.".format(name=name, version=res[0]['version']))
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url, error_message=None, data_home=data_home
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name,
version)
json_data = _get_json_content_from_openml_api(
url, error_msg, data_home=data_home
)
return json_data['data']['dataset'][0]
def _get_data_description_by_id(
data_id: int, data_home: Optional[str]
) -> Dict[str, Any]:
# OpenML API function: https://www.openml.org/api_docs#!/data/get_data_id
url = _DATA_INFO.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_set_description']
def _get_data_features(
data_id: int, data_home: Optional[str]
) -> OpenmlFeaturesType:
# OpenML function:
# https://www.openml.org/api_docs#!/data/get_data_features_id
url = _DATA_FEATURES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
return json_data['data_features']['feature']
def _get_data_qualities(
data_id: int, data_home: Optional[str]
) -> OpenmlQualitiesType:
# OpenML API function:
# https://www.openml.org/api_docs#!/data/get_data_qualities_id
url = _DATA_QUALITIES.format(data_id)
error_message = "Dataset with data_id {} not found.".format(data_id)
json_data = _get_json_content_from_openml_api(
url, error_message, data_home=data_home
)
# the qualities might not be available, but we still try to process
# the data
return json_data.get('data_qualities', {}).get('quality', [])
def _get_num_samples(data_qualities: OpenmlQualitiesType) -> int:
"""Get the number of samples from data qualities.
Parameters
----------
data_qualities : list of dict
Used to retrieve the number of instances (samples) in the dataset.
Returns
-------
n_samples : int
The number of samples in the dataset or -1 if data qualities are
unavailable.
"""
# If the data qualities are unavailable, we return -1
default_n_samples = -1
qualities = {d['name']: d['value'] for d in data_qualities}
return int(float(qualities.get('NumberOfInstances', default_n_samples)))
def _load_arff_response(
url: str,
data_home: Optional[str],
return_type, encode_nominal: bool,
parse_arff: Callable[[ArffContainerType], Tuple],
md5_checksum: str
) -> Tuple:
"""Load arff data with url and parses arff response with parse_arff"""
response = _open_openml_url(url, data_home)
with closing(response):
# Note that if the data is dense, no reading is done until the data
# generator is iterated.
actual_md5_checksum = hashlib.md5()
def _stream_checksum_generator(response):
for line in response:
actual_md5_checksum.update(line)
yield line.decode('utf-8')
stream = _stream_checksum_generator(response)
arff = _arff.load(stream,
return_type=return_type,
encode_nominal=encode_nominal)
parsed_arff = parse_arff(arff)
# consume remaining stream, if early exited
for _ in stream:
pass
if actual_md5_checksum.hexdigest() != md5_checksum:
raise ValueError("md5 checksum of local file for " + url +
" does not match description. "
"Downloaded file could have been modified / "
"corrupted, clean cache and retry...")
return parsed_arff
def _download_data_to_bunch(
url: str,
sparse: bool,
data_home: Optional[str],
*,
as_frame: bool,
features_list: List,
data_columns: List[int],
target_columns: List,
shape: Optional[Tuple[int, int]],
md5_checksum: str
):
"""Download OpenML ARFF and convert to Bunch of data
"""
# NB: this function is long in order to handle retry for any failure
# during the streaming parse of the ARFF.
# Prepare which columns and data types should be returned for the X and y
features_dict = {feature['name']: feature for feature in features_list}
# XXX: col_slice_y should be all nominal or all numeric
_verify_target_data_type(features_dict, target_columns)
col_slice_y = [int(features_dict[col_name]['index'])
for col_name in target_columns]
col_slice_x = [int(features_dict[col_name]['index'])
for col_name in data_columns]
for col_idx in col_slice_y:
feat = features_list[col_idx]
nr_missing = int(feat['number_of_missing_values'])
if nr_missing > 0:
raise ValueError('Target column {} has {} missing values. '
'Missing values are not supported for target '
'columns. '.format(feat['name'], nr_missing))
# Access an ARFF file on the OpenML server. Documentation:
# https://www.openml.org/api_data_docs#!/data/get_download_id
if sparse is True:
return_type = _arff.COO
else:
return_type = _arff.DENSE_GEN
frame = nominal_attributes = None
parse_arff: Callable
postprocess: Callable
if as_frame:
columns = data_columns + target_columns
parse_arff = partial(_convert_arff_data_dataframe, columns=columns,
features_dict=features_dict)
def postprocess(frame):
X = frame[data_columns]
if len(target_columns) >= 2:
y = frame[target_columns]
elif len(target_columns) == 1:
y = frame[target_columns[0]]
else:
y = None
return X, y, frame, nominal_attributes
else:
def parse_arff(arff):
X, y = _convert_arff_data(arff, col_slice_x, col_slice_y, shape)
# nominal attributes is a dict mapping from the attribute name to
# the possible values. Includes also the target column (which will
# be popped off below, before it will be packed in the Bunch
# object)
nominal_attributes = {k: v for k, v in arff['attributes']
if isinstance(v, list) and
k in data_columns + target_columns}
return X, y, nominal_attributes
def postprocess(X, y, nominal_attributes):
is_classification = {col_name in nominal_attributes
for col_name in target_columns}
if not is_classification:
# No target
pass
elif all(is_classification):
y = np.hstack([
np.take(
np.asarray(nominal_attributes.pop(col_name),
dtype='O'),
y[:, i:i + 1].astype(int, copy=False))
for i, col_name in enumerate(target_columns)
])
elif any(is_classification):
raise ValueError('Mix of nominal and non-nominal targets is '
'not currently supported')
# reshape y back to 1-D array, if there is only 1 target column;
# back to None if there are not target columns
if y.shape[1] == 1:
y = y.reshape((-1,))
elif y.shape[1] == 0:
y = None
return X, y, frame, nominal_attributes
out = _retry_with_clean_cache(url, data_home)(
_load_arff_response)(url, data_home,
return_type=return_type,
encode_nominal=not as_frame,
parse_arff=parse_arff,
md5_checksum=md5_checksum)
X, y, frame, nominal_attributes = postprocess(*out)
return Bunch(data=X, target=y, frame=frame,
categories=nominal_attributes,
feature_names=data_columns,
target_names=target_columns)
def _verify_target_data_type(features_dict, target_columns):
# verifies the data type of the y array in case there are multiple targets
# (throws an error if these targets do not comply with sklearn support)
if not isinstance(target_columns, list):
raise ValueError('target_column should be list, '
'got: %s' % type(target_columns))
found_types = set()
for target_column in target_columns:
if target_column not in features_dict:
raise KeyError('Could not find target_column={}')
if features_dict[target_column]['data_type'] == "numeric":
found_types.add(np.float64)
else:
found_types.add(object)
# note: we compare to a string, not boolean
if features_dict[target_column]['is_ignore'] == 'true':
warn('target_column={} has flag is_ignore.'.format(
target_column))
if features_dict[target_column]['is_row_identifier'] == 'true':
warn('target_column={} has flag is_row_identifier.'.format(
target_column))
if len(found_types) > 1:
raise ValueError('Can only handle homogeneous multi-target datasets, '
'i.e., all targets are either numeric or '
'categorical.')
def _valid_data_column_names(features_list, target_columns):
# logic for determining on which columns can be learned. Note that from the
# OpenML guide follows that columns that have the `is_row_identifier` or
# `is_ignore` flag, these can not be learned on. Also target columns are
# excluded.
valid_data_column_names = []
for feature in features_list:
if (feature['name'] not in target_columns
and feature['is_ignore'] != 'true'
and feature['is_row_identifier'] != 'true'):
valid_data_column_names.append(feature['name'])
return valid_data_column_names
def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = 'active',
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = 'default-target',
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = 'auto'
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets using joblib.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'openml')
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name))
data_info = _get_data_info_by_name(name, version, data_home)
data_id = data_info['did']
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version))
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or "
"data_id.")
data_description = _get_data_description_by_id(data_id, data_home)
if data_description['status'] != "active":
warn("Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description['version'],
data_description['name'],
data_description['url']))
if 'error' in data_description:
warn("OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description['error']))
if 'warning' in data_description:
warn("OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description['warning']))
return_sparse = False
if data_description['format'].lower() == 'sparse_arff':
return_sparse = True
if as_frame == 'auto':
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError('Cannot return dataframe with sparse data')
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if 'true' in (feature['is_ignore'], feature['is_row_identifier']):
continue
if feature['data_type'] == 'string':
raise ValueError('STRING attributes are not supported for '
'array representation. Try as_frame=True')
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [feature['name'] for feature in features_list
if feature['is_target'] == 'true']
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError("Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column)))
data_columns = _valid_data_column_names(features_list,
target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description['file_id'])
bunch = _download_data_to_bunch(url, return_sparse, data_home,
as_frame=bool(as_frame),
features_list=features_list, shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description[
"md5_checksum"])
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop('description'))
bunch.update(
DESCR=description, details=data_description,
url="https://www.openml.org/d/{}".format(data_id))
return bunch
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/sparse/panel.py | 9 | 18717 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import warnings
from pandas.compat import range, lrange, zip
from pandas import compat
import numpy as np
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.sparse.frame import SparseDataFrame
from pandas.util.decorators import deprecate
import pandas.core.common as com
import pandas.core.ops as ops
class SparsePanelAxis(object):
def __init__(self, cache_field, frame_attr):
self.cache_field = cache_field
self.frame_attr = frame_attr
def __get__(self, obj, type=None):
return getattr(obj, self.cache_field, None)
def __set__(self, obj, value):
value = _ensure_index(value)
if isinstance(value, MultiIndex):
raise NotImplementedError("value cannot be a MultiIndex")
for v in compat.itervalues(obj._frames):
setattr(v, self.frame_attr, value)
setattr(obj, self.cache_field, value)
class SparsePanel(Panel):
"""
Sparse version of Panel
Parameters
----------
frames : dict of DataFrame objects
items : array-like
major_axis : array-like
minor_axis : array-like
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
Notes
-----
"""
ndim = 3
_typ = 'panel'
_subtyp = 'sparse_panel'
def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None,
default_fill_value=np.nan, default_kind='block',
copy=False):
# deprecation #11157
warnings.warn("SparsePanel is deprecated and will be removed in a future version",
FutureWarning, stacklevel=2)
if frames is None:
frames = {}
if isinstance(frames, np.ndarray):
new_frames = {}
for item, vals in zip(items, frames):
new_frames[item] = \
SparseDataFrame(vals, index=major_axis,
columns=minor_axis,
default_fill_value=default_fill_value,
default_kind=default_kind)
frames = new_frames
if not isinstance(frames, dict):
raise TypeError('input must be a dict, a %r was passed' %
type(frames).__name__)
self.default_fill_value = fill_value = default_fill_value
self.default_kind = kind = default_kind
# pre-filter, if necessary
if items is None:
items = Index(sorted(frames.keys()))
items = _ensure_index(items)
(clean_frames,
major_axis,
minor_axis) = _convert_frames(frames, major_axis,
minor_axis, kind=kind,
fill_value=fill_value)
self._frames = clean_frames
# do we want to fill missing ones?
for item in items:
if item not in clean_frames:
raise ValueError('column %r not found in data' % item)
self._items = items
self.major_axis = major_axis
self.minor_axis = minor_axis
def _consolidate_inplace(self): # pragma: no cover
# do nothing when DataFrame calls this method
pass
def __array_wrap__(self, result):
return SparsePanel(result, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
@classmethod
def from_dict(cls, data):
"""
Analogous to Panel.from_dict
"""
return SparsePanel(data)
def to_dense(self):
"""
Convert SparsePanel to (dense) Panel
Returns
-------
dense : Panel
"""
return Panel(self.values, self.items, self.major_axis,
self.minor_axis)
def as_matrix(self):
return self.values
@property
def values(self):
# return dense values
return np.array([self._frames[item].values
for item in self.items])
# need a special property for items to make the field assignable
_items = None
def _get_items(self):
return self._items
def _set_items(self, new_items):
new_items = _ensure_index(new_items)
if isinstance(new_items, MultiIndex):
raise NotImplementedError("itemps cannot be a MultiIndex")
# need to create new frames dict
old_frame_dict = self._frames
old_items = self._items
self._frames = dict((new_k, old_frame_dict[old_k])
for new_k, old_k in zip(new_items, old_items))
self._items = new_items
items = property(fget=_get_items, fset=_set_items)
# DataFrame's index
major_axis = SparsePanelAxis('_major_axis', 'index')
# DataFrame's columns / "items"
minor_axis = SparsePanelAxis('_minor_axis', 'columns')
def _ixs(self, i, axis=0):
"""
for compat as we don't support Block Manager here
i : int, slice, or sequence of integers
axis : int
"""
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if com.is_list_like(key):
return self.reindex(**{self._get_axis_name(axis): key})
return self.xs(key, axis=axis)
def _slice(self, slobj, axis=0, kind=None):
"""
for compat as we don't support Block Manager here
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
return self.reindex(**{axis: index[slobj]})
def _get_item_cache(self, key):
return self._frames[key]
def __setitem__(self, key, value):
if isinstance(value, DataFrame):
value = value.reindex(index=self.major_axis,
columns=self.minor_axis)
if not isinstance(value, SparseDataFrame):
value = value.to_sparse(fill_value=self.default_fill_value,
kind=self.default_kind)
else:
raise ValueError('only DataFrame objects can be set currently')
self._frames[key] = value
if key not in self.items:
self._items = Index(list(self.items) + [key])
def set_value(self, item, major, minor, value):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Panel
Returns
-------
panel : SparsePanel
"""
dense = self.to_dense().set_value(item, major, minor, value)
return dense.to_sparse(kind=self.default_kind,
fill_value=self.default_fill_value)
def __delitem__(self, key):
loc = self.items.get_loc(key)
indices = lrange(loc) + lrange(loc + 1, len(self.items))
del self._frames[key]
self._items = self._items.take(indices)
def __getstate__(self):
# pickling
return (self._frames, com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value, self.default_kind)
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
def copy(self, deep=True):
"""
Make a copy of the sparse panel
Returns
-------
copy : SparsePanel
"""
d = self._construct_axes_dict()
if deep:
new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames))
d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d))
else:
new_data = self._frames.copy()
d['default_fill_value']=self.default_fill_value
d['default_kind']=self.default_kind
return SparsePanel(new_data, **d)
def to_frame(self, filter_observations=True):
"""
Convert SparsePanel to (dense) DataFrame
Returns
-------
frame : DataFrame
"""
if not filter_observations:
raise TypeError('filter_observations=False not supported for '
'SparsePanel.to_long')
I, N, K = self.shape
counts = np.zeros(N * K, dtype=int)
d_values = {}
d_indexer = {}
for item in self.items:
frame = self[item]
values, major, minor = _stack_sparse_info(frame)
# values are stacked column-major
indexer = minor * N + major
counts.put(indexer, counts.take(indexer) + 1) # cuteness
d_values[item] = values
d_indexer[item] = indexer
# have full set of observations for each item
mask = counts == I
# for each item, take mask values at index locations for those sparse
# values, and use that to select values
values = np.column_stack([d_values[item][mask.take(d_indexer[item])]
for item in self.items])
inds, = mask.nonzero()
# still column major
major_labels = inds % N
minor_labels = inds // N
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
verify_integrity=False)
df = DataFrame(values, index=index, columns=self.items)
return df.sortlevel(level=0)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def reindex(self, major=None, items=None, minor=None, major_axis=None,
minor_axis=None, copy=False):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major=major, major_axis=major_axis)
minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis)
if com._all_none(items, major, minor):
raise ValueError('Must specify at least one axis')
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise NotImplementedError('Reindexing with new items not yet '
'supported')
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames))
return SparsePanel(new_frames, items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combine(self, other, func, axis=0):
if isinstance(other, DataFrame):
return self._combineFrame(other, func, axis=axis)
elif isinstance(other, Panel):
return self._combinePanel(other, func)
elif np.isscalar(other):
new_frames = dict((k, func(v, other))
for k, v in compat.iteritems(self))
return self._new_like(new_frames)
def _combineFrame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
# TODO: make faster!
new_frames = {}
for item, item_slice in zip(self.items, new_values):
old_frame = self[item]
ofv = old_frame.default_fill_value
ok = old_frame.default_kind
new_frames[item] = SparseDataFrame(item_slice,
index=self.major_axis,
columns=self.minor_axis,
default_fill_value=ofv,
default_kind=ok)
return self._new_like(new_frames)
def _new_like(self, new_frames):
return SparsePanel(new_frames, self.items, self.major_axis,
self.minor_axis,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
def _combinePanel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
new_frames = {}
for item in items:
new_frames[item] = func(this[item], other[item])
if not isinstance(other, SparsePanel):
new_default_fill = self.default_fill_value
else:
# maybe unnecessary
new_default_fill = func(self.default_fill_value,
other.default_fill_value)
return SparsePanel(new_frames, items, major, minor,
default_fill_value=new_default_fill,
default_kind=self.default_kind)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self))
return DataFrame(slices, index=self.minor_axis, columns=self.items)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : SparseDataFrame
index -> major axis, columns -> items
"""
slices = dict((k, v[key]) for k, v in compat.iteritems(self))
return SparseDataFrame(slices, index=self.major_axis,
columns=self.items,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
# TODO: allow SparsePanel to work with flex arithmetic.
# pow and mod only work for scalars for now
def pow(self, val, *args, **kwargs):
"""wrapper around `__pow__` (only works for scalar values)"""
return self.__pow__(val)
def mod(self, val, *args, **kwargs):
"""wrapper around `__mod__` (only works for scalar values"""
return self.__mod__(val)
# Sparse objects opt out of numexpr
SparsePanel._add_aggregate_operations(use_numexpr=False)
ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **ops.panel_special_funcs)
SparseWidePanel = SparsePanel
def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'):
from pandas.core.panel import _get_combined_index
output = {}
for item, df in compat.iteritems(frames):
if not isinstance(df, SparseDataFrame):
df = SparseDataFrame(df, default_kind=kind,
default_fill_value=fill_value)
output[item] = df
if index is None:
all_indexes = [df.index for df in output.values()]
index = _get_combined_index(all_indexes)
if columns is None:
all_columns = [df.columns for df in output.values()]
columns = _get_combined_index(all_columns)
index = _ensure_index(index)
columns = _ensure_index(columns)
for item, df in compat.iteritems(output):
if not (df.index.equals(index) and df.columns.equals(columns)):
output[item] = df.reindex(index=index, columns=columns)
return output, index, columns
def _stack_sparse_info(frame):
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
for col in frame.columns:
series = frame[col]
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
sparse_values = np.concatenate(vals_to_concat)
return sparse_values, major_labels, minor_labels
| gpl-3.0 |
duonys/deep-learning-chainer | dlchainer/SdA.py | 1 | 5225 | #-*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.externals.six import with_metaclass
from chainer import Variable, FunctionSet, optimizers, cuda
import chainer.functions as F
from .dA import dA
from . import utils
class SdAMixin(with_metaclass(ABCMeta, BaseEstimator)):
"""
Stacked Denoising Autoencoder
References:
http://deeplearning.net/tutorial/SdA.html
https://github.com/pfnet/chainer/blob/master/examples/mnist/train_mnist.py
"""
def __init__(self, n_input, n_hiddens, n_output, noise_levels=None, dropout_ratios=None, do_pretrain=True,
batch_size=100, n_epoch_pretrain=20, n_epoch_finetune=20, optimizer=optimizers.Adam(),
activation_func=F.relu, verbose=False, gpu=-1):
self.n_input = n_input
self.n_hiddens = n_hiddens
self.n_output = n_output
self.do_pretrain = do_pretrain
self.batch_size = batch_size
self.n_epoch_pretrain = n_epoch_pretrain
self.n_epoch_finetune = n_epoch_finetune
self.optimizer = optimizer
self.dAs = \
[dA(self.n_input, self.n_hiddens[0],
self._check_var(noise_levels, 0), self._check_var(dropout_ratios, 0), self.batch_size,
self.n_epoch_pretrain, copy.deepcopy(optimizer),
activation_func, verbose, gpu)] + \
[dA(self.n_hiddens[i], self.n_hiddens[i + 1],
self._check_var(noise_levels, i + 1), self._check_var(dropout_ratios, i + 1), self.batch_size,
self.n_epoch_pretrain, copy.deepcopy(optimizer),
activation_func, verbose, gpu) for i in range(len(n_hiddens) - 1)]
self.verbose = verbose
self.gpu = gpu
def _check_var(self, var, index, default_val=0.0):
return var[index] if var is not None else default_val
def fit(self, X, y):
if self.do_pretrain:
self._pretrain(X)
self._finetune(X, y)
def _pretrain(self, X):
for layer, dA in enumerate(self.dAs):
utils.disp('*** pretrain layer: {} ***'.format(layer + 1), self.verbose)
if layer == 0:
layer_input = X
else:
layer_input = self.dAs[layer - 1].encode(Variable(layer_input), train=False).data
dA.fit(layer_input)
def _finetune(self, X, y):
utils.disp('*** finetune ***', self.verbose)
# construct model and setup optimizer
params = {'l{}'.format(layer + 1): dA.encoder for layer, dA in enumerate(self.dAs)}
params.update({'l{}'.format(len(self.dAs) + 1): F.Linear(self.dAs[-1].n_hidden, self.n_output)})
self.model = FunctionSet(**params)
self.optimizer.setup(self.model)
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
self.model.to_gpu()
xp = cuda.cupy if self.gpu >= 0 else np
n = len(X)
for epoch in range(self.n_epoch_finetune):
utils.disp('epoch: {}'.format(epoch + 1), self.verbose)
perm = np.random.permutation(n)
sum_loss = 0
for i in range(0, n, self.batch_size):
X_batch = xp.asarray(X[perm[i: i + self.batch_size]])
y_batch = xp.asarray(y[perm[i: i + self.batch_size]])
self.optimizer.zero_grads()
y_var = self._forward(X_batch)
loss = self._loss_func(y_var, Variable(y_batch))
loss.backward()
self.optimizer.update()
sum_loss += float(loss.data) * len(X_batch)
utils.disp('fine tune mean loss={}'.format(sum_loss / n), self.verbose)
def _forward(self, X, train=True):
X_var = Variable(X)
output = X_var
for dA in self.dAs:
output = dA.encode(output, train)
y_var = self.model['l{}'.format(len(self.dAs) + 1)](output)
return y_var
@abstractmethod
def _loss_func(self, y_var, t_var):
pass
class SdAClassifier(SdAMixin, ClassifierMixin):
"""
References:
http://scikit-learn.org/stable/developers/#rolling-your-own-estimator
"""
def _loss_func(self, y_var, t_var):
return F.softmax_cross_entropy(y_var, t_var)
def fit(self, X, y):
assert X.dtype == np.float32 and y.dtype == np.int32
super().fit(X, y)
def transform(self, X):
return self._forward(X, train=False).data
def predict(self, X):
return np.apply_along_axis(lambda x: np.argmax(x), arr=self.transform(X), axis=1)
class SdARegressor(SdAMixin, RegressorMixin):
"""
References:
http://scikit-learn.org/stable/developers/#rolling-your-own-estimator
"""
def _loss_func(self, y_var, t_var):
y_var = F.reshape(y_var, [len(y_var)])
return F.mean_squared_error(y_var, t_var)
def fit(self, X, y):
assert X.dtype == np.float32 and y.dtype == np.float32
super().fit(X, y)
def transform(self, X):
return self._forward(X, train=False).data
def predict(self, X):
return self.transform(X)
| mit |
robbymeals/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
EclipseXuLu/DataHouse | DataHouse/crawler/university_spider.py | 1 | 3941 | import requests
from bs4 import BeautifulSoup
from lxml import etree
import pandas as pd
from io import StringIO, BytesIO
university_list = []
class University():
def __init__(self, name='', is_985=False, is_211=False, has_institute=False, location='', orgnization='',
education_level='', education_type='', university_type=''):
self.name = name
self.is_985 = is_985
self.is_211 = is_211
self.has_institute = has_institute
self.location = location
self.orgnization = orgnization
self.education_level = education_level
self.education_type = education_type
self.university_type = university_type
def __str__(self):
return "{ " + str(self.name) + " ;" + str(self.is_985) + " ;" + str(self.is_211) + " ;" + str(
self.has_institute) + " ;" + self.location + " ;" + self.orgnization + " ;" + self.education_level + " ;" \
+ self.education_type + " ;" + self.university_type + " }"
def crawl(page_url):
headers = {
'Host': 'gaokao.chsi.com.cn',
'Upgrade-Insecure-Requests': '1',
'Referer': 'http://gaokao.chsi.com.cn/sch/search--ss-on,searchType-1,option-qg,start-0.dhtml',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36'
}
response = requests.get(page_url, timeout=20, headers=headers)
if response.status_code == 200:
html_raw = response.text
soup = BeautifulSoup(html_raw, 'html5lib')
parser = etree.HTMLParser()
tree = etree.parse(StringIO(html_raw), parser)
for tr in soup.find_all(bgcolor="#E1E1E1")[0].find_all('tr', attrs={'bgcolor': '#FFFFFF'}):
try:
name = tr.td.a.text.strip() # 大学名称
detail_url = 'http://gaokao.chsi.com.cn' + tr.td.a['href'] # 详情信息页面
is_985 = True if tr.td.find(class_='a211985 span985') is not None else False # 985
is_211 = True if tr.td.find(class_='a211985 span211') is not None else False # 211
has_institute = True if tr.td.find(class_='a211985 spanyan') is not None else False # 研究生院
location = tr.find_all('td')[1].get_text().strip() # 学校地址
orgnization = tr.find_all('td')[2].get_text().strip() # 所属机构
education_level = tr.find_all('td')[3].get_text().strip() # 学历层次
education_type = tr.find_all('td')[4].get_text().strip() # 办学类型
university_type = tr.find_all('td')[5].get_text().strip() # 院校类型
university = University(name, is_985, is_211, has_institute, location, orgnization, education_level,
education_type, university_type)
print(university)
university_list.append([name, is_985, is_211, has_institute, location, orgnization, education_level,
education_type, university_type])
except:
pass
else:
print('Error!!')
def output(some_list, filepath):
col = [
u'院校名称',
u'985',
u'211',
u'研究生院',
u'所在地',
u'院校隶属',
u'学历层次',
u'办学类型',
u'院校类型']
df = pd.DataFrame(some_list, columns=col)
df.to_excel(filepath, '大学', index=False)
if __name__ == '__main__':
page_urllist = ['http://gaokao.chsi.com.cn/sch/search--ss-on,searchType-1,option-qg,start-%d.dhtml'
% _ for _ in range(0, 2660, 20)]
# crawl('http://gaokao.chsi.com.cn/sch/search--ss-on,searchType-1,option-qg,start-0.dhtml')
for page_url in page_urllist:
crawl(page_url)
output(university_list, './大学.xlsx')
| mit |
reinaldomaslim/Singaboat_RobotX2016 | robotx_nav/nodes/task2_toplevel_try.py | 3 | 4644 | #!/usr/bin/env python
import multiprocessing as mp
import rospy
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
import numpy as np
from sklearn.cluster import KMeans, DBSCAN
from sklearn import svm
from move_base_loiter import Loiter
from move_base_waypoint import MoveTo
from color_totem_planner import ColorTotemPlanner
# import tf
# from math import pi, cos, sin
# from move_base_util import MoveBaseUtil
import time
def loiter_worker(v_dict_q, q):
""" go to gps point """
p = mp.current_process()
print p.name, p.pid, 'Starting'
loiter_obj = Loiter("loiter", is_newnode=True, target=None,
radius=2.5, polygon=4, is_ccw=True, is_relative=False)
visited_dict = {"red": False, "green": False, "blue": False, "yellow": False}
# spawn the gps coordinate, one time only
while True:
cid, target, radius, polygon, is_ccw = q.get()
print "from planner", target
if target[2] < -1e6: # unless send a -inf z by waypoint pub: terminating
break
else:
loiter_obj.respawn(target, polygon, radius, is_ccw)
visited_dict[cid] = True
v_dict_q.put(visited_dict) # dont hold moveto
print p.name, p.pid, 'Exiting'
def moveto_worker(q, hold_move_q):
""" constant heading to pass the gate,
need roi_target_identifier to give/update waypoint """
p = mp.current_process()
print p.name, p.pid, 'Starting'
# get the waypoints, loop wait for updates
moveto_obj = MoveTo("moveto", is_newnode=True, target=None, is_relative=False)
while True:
target = q.get()
print target
if target[2] < -1e6: # unless send a -inf z by waypoint pub: terminating
break
else:
moveto_obj.respawn(target)
hold_move_q.put(False)
print p.name, p.pid, 'Exiting'
# not required
# def cancel_goal_worker(conn, repetition):
# """ asynchronously cancel goals"""
# p = mp.current_process()
# print p.name, p.pid, 'Starting'
# while True:
# command = conn.recv()
# print 'child: ', command
# if command == 'cancel': # cancel goal
# print 'doing cancelling'
# force_cancel = ForceCancel(nodename="forcecancel", repetition=repetition)
# conn.send('cancelled')
# elif command == 'exit': # complete
# print "cancel goal complete, exit"
# break
# else: # conn.recv() == 0, idle, wait for command
# pass
# time.sleep()
#
# print p.name, p.pid, 'Exiting'
def planner_worker(v_dict_q, loiter_q, moveto_q, hold_move_q, cancel_conn):
""" plan for totems """
p = mp.current_process()
print p.name, p.pid, 'Starting'
planner_obj = ColorTotemPlanner("color_planner")
while True:
if not v_dict_q.empty(): # get update from loiter on visited
visited_dict = v_dict_q.get()
planner_obj.update_visit(visited_dict) # update visited
if not hold_move_q.empty(): # get update from moveto on success
hol = hold_move_q.get() # free moveto to be on hold after moveto finished
planner_obj.update_hold_moveto(hol)
isready, loiter_target, moveto_target, allvisited, hold_moveto = planner_obj.planner() # try to find onhold loiter target
# print isready
if allvisited: # all visited, kill all worker and exit
poison_pill = [0, 0, -float("inf")]
loiter_q.put([None, poison_pill, None, None, None])
# need an exit target
if moveto_target != []:
moveto_q.put(moveto_target)
# finally kill moveto
time.sleep(1)
moveto_q.put(poison_pill)
break
elif isready and not allvisited and loiter_target != []: # still have pending loiter points
print "loiter called"
loiter_q.put(loiter_target)
elif not isready and not hold_moveto and not allvisited and moveto_target != []: # need to explore for valid loiter points
print "moveto called"
moveto_q.put(moveto_target)
print p.name, p.pid, 'Exiting'
if __name__ == "__main__":
moveto_q = mp.Queue()
hold_moveto_q = mp.Queue()
cancel_p_conn, cancel_c_conn = mp.Pipe()
# loiter_p_conn, loiter_c_conn = mp.Pipe()
loiter_q = mp.Queue(1)
v_dict_q = mp.Queue(1)
# manager = mp.Manager()
# visited_dict = manager.dict()
# visited_dict = {"red": False, "green": False, "blue": False, "yellow": False}
loiter_mp = mp.Process(name="ltr", target=loiter_worker, args=(v_dict_q, loiter_q,))
moveto_mp = mp.Process(name="mvt", target=moveto_worker, args=(moveto_q, hold_moveto_q,))
# cancel_goal_mp = mp.Process(name="ccg", target=cancel_goal_worker, args=(cancel_p_conn, 5,))
planner_mp = mp.Process(name="pln", target=planner_worker, args=(v_dict_q, loiter_q, moveto_q, hold_moveto_q, cancel_c_conn,))
loiter_mp.start()
moveto_mp.start()
# cancel_goal_mp.start()
planner_mp.start()
# close
loiter_mp.join()
moveto_mp.join()
planner_mp.join()
| gpl-3.0 |
natsheh/semantic_query | api.py | 1 | 4747 | # -*- coding: utf-8 -*-
#
# This file is part of semantic_query.
# Copyright (C) 2016 CIAPPLE.
#
# This is a free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
# Semantic Query API
# Author: Hussein AL-NATSHEH <[email protected]>
# Affiliation: CIAPPLE, Jordan
import os, argparse, pickle, json
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import Pipeline
from collections import OrderedDict
from itertools import islice
from bs4 import BeautifulSoup
from flask import Flask, request, make_response
from flask_httpauth import HTTPBasicAuth
from flask_restful import Resource, Api, reqparse
def top(n, sorted_results):
return list(islice(sorted_results.iteritems(), n))
def query_by_text(transformer, transformed, documents, index, query_text, url, n_results=10):
query = transformer.transform(query_text)
sims = cosine_similarity(query.reshape(1,-1), transformed)
scores = sims[0][:].reshape(-1,1)
results= dict()
for i in range(len(transformed)):
results[i] = scores[i]
sorted_results = OrderedDict(sorted(results.items(), key=lambda k: k[1], reverse=True))
topn = top(n_results, sorted_results)
results = np.array(range(n_results), dtype=np.object)
for rank, (answer, score) in enumerate(topn):
title = documents[answer].split('\n__')[0]
title_t = title.replace (" ", "_")
doc_id = str(index[answer])
reference = url + title_t
results[rank] = {'reference': reference, 'score': str(score), 'doc_id': doc_id, 'title': title, 'answer': documents[answer]}
return results.tolist()
class Query(Resource):
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('question', type=str, required=True, help='Query text')
parser.add_argument('userId', type=str, required=False, help='User ID')
parser.add_argument('questionId', type=str, required=False, help='Question ID')
parser.add_argument('limit', type=int, required=False, help='Size of the returned results')
args = parser.parse_args()
q = request.args.get('question')
question = BeautifulSoup(q, "lxml").p.contents
try:
size = request.args.get('limit')
n_results = int(size)
if n_results > 100:
n_results = 100
except:
n_results = 3
user_id = request.args.get('userId')
question_id = request.args.get('questionId')
response = {}
response['userId'] = user_id
response['questionId'] = question_id
response['limit'] = n_results
response['interesteId'] = 'future_feature'
response['results'] = query_by_text(transformer, transformed, documents, index, question, url, n_results=n_results)
if str(type(question)) == "<type 'list'>":
question = question[0]
response['question'] = question
resp = make_response()
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers['content-type'] = 'application/json'
resp.data = response
return response
except Exception as e:
return {'error': str(e)}
def get(self):
try:
q = request.args.get('question')
question = BeautifulSoup(q, "lxml").p.contents
try:
user_id = request.args.get('userId')
except:
user_id = 'uid1'
try:
question_id = request.args.get('questionId')
except:
question_id = 'qid1'
try:
size = request.args.get('limit')
n_results = int(size)
if n_results > 100:
n_results = 100
except:
n_results = 3
response = dict()
response['userId'] = user_id
response['questionId'] = question_id
response['limit'] = n_results
response['interesteId'] = 'future_feature'
results = query_by_text(transformer, transformed, documents, index, question, url, n_results=n_results)
response['results'] = results
if str(type(question)) == "<type 'list'>":
question = question[0]
response['question'] = question
return response
except Exception as e:
return {'error': str(e)}
app = Flask(__name__, static_url_path="")
auth = HTTPBasicAuth()
api = Api(app)
api.add_resource(Query, '/Query/')
if __name__ == '__main__':
transformed_file = 'transformed.pickle'
docs_file = 'documents.pickle'
index_file = 'index.pickle'
transformer_file = 'transformer.pickle'
transformed = np.load(transformed_file)
index = pickle.load(open(index_file,'rb'))
documents = pickle.load(open(docs_file,'rb'))
print 'number of documents :', len(index)
transformer = pickle.load(open(transformer_file,'rb'))
url_config = json.load(open('url_config.json', 'r'))
url = url_config['url']
print 'Ready to call!!'
app.run(host='0.0.0.0', threaded=True)
| bsd-3-clause |
giorgiop/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
trungnt13/scikit-learn | sklearn/datasets/lfw.py | 38 | 19042 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
fmfn/UnbalancedDataset | examples/applications/plot_outlier_rejections.py | 2 | 4354 | """
===============================================================
Customized sampler to implement an outlier rejections estimator
===============================================================
This example illustrates the use of a custom sampler to implement an outlier
rejections estimator. It can be used easily within a pipeline in which the
number of samples can vary during training, which usually is a limitation of
the current scikit-learn pipeline.
"""
# Authors: Guillaume Lemaitre <[email protected]>
# License: MIT
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons, make_blobs
from sklearn.ensemble import IsolationForest
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from imblearn import FunctionSampler
from imblearn.pipeline import make_pipeline
print(__doc__)
rng = np.random.RandomState(42)
def plot_scatter(X, y, title):
"""Function to plot some data as a scatter plot."""
plt.figure()
plt.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1")
plt.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0")
plt.legend()
plt.title(title)
##############################################################################
# Toy data generation
##############################################################################
##############################################################################
# We are generating some non Gaussian data set contaminated with some unform
# noise.
moons, _ = make_moons(n_samples=500, noise=0.05)
blobs, _ = make_blobs(
n_samples=500, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25
)
outliers = rng.uniform(low=-3, high=3, size=(500, 2))
X_train = np.vstack([moons, blobs, outliers])
y_train = np.hstack(
[
np.ones(moons.shape[0], dtype=np.int8),
np.zeros(blobs.shape[0], dtype=np.int8),
rng.randint(0, 2, size=outliers.shape[0], dtype=np.int8),
]
)
plot_scatter(X_train, y_train, "Training dataset")
##############################################################################
# We will generate some cleaned test data without outliers.
moons, _ = make_moons(n_samples=50, noise=0.05)
blobs, _ = make_blobs(
n_samples=50, centers=[(-0.75, 2.25), (1.0, 2.0)], cluster_std=0.25
)
X_test = np.vstack([moons, blobs])
y_test = np.hstack(
[np.ones(moons.shape[0], dtype=np.int8), np.zeros(blobs.shape[0], dtype=np.int8)]
)
plot_scatter(X_test, y_test, "Testing dataset")
##############################################################################
# How to use the :class:`~imblearn.FunctionSampler`
##############################################################################
##############################################################################
# We first define a function which will use
# :class:`~sklearn.ensemble.IsolationForest` to eliminate some outliers from
# our dataset during training. The function passed to the
# :class:`~imblearn.FunctionSampler` will be called when using the method
# ``fit_resample``.
def outlier_rejection(X, y):
"""This will be our function used to resample our dataset."""
model = IsolationForest(max_samples=100, contamination=0.4, random_state=rng)
model.fit(X)
y_pred = model.predict(X)
return X[y_pred == 1], y[y_pred == 1]
reject_sampler = FunctionSampler(func=outlier_rejection)
X_inliers, y_inliers = reject_sampler.fit_resample(X_train, y_train)
plot_scatter(X_inliers, y_inliers, "Training data without outliers")
##############################################################################
# Integrate it within a pipeline
##############################################################################
##############################################################################
# By elimnating outliers before the training, the classifier will be less
# affected during the prediction.
pipe = make_pipeline(
FunctionSampler(func=outlier_rejection),
LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng),
)
y_pred = pipe.fit(X_train, y_train).predict(X_test)
print(classification_report(y_test, y_pred))
clf = LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng)
y_pred = clf.fit(X_train, y_train).predict(X_test)
print(classification_report(y_test, y_pred))
plt.show()
| mit |
Nyker510/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
nmih/ssbio | ssbio/databases/pdb.py | 1 | 34959 | """
PDBProp
=======
"""
import gzip
import json
import logging
import os.path as op
import mmtf
import os
from cobra.core import DictList
import pandas as pd
import requests
import deprecation
from Bio.PDB import PDBList
from lxml import etree
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import ssbio.databases.pisa as pisa
import ssbio.utils
from ssbio.protein.structure.structprop import StructProp
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
log = logging.getLogger(__name__)
class PDBProp(StructProp):
"""Store information about a protein structure from the Protein Data Bank.
Extends the :class:`~ssbio.protein.structure.structprop.StructProp` class to allow initialization of the structure
by its PDB ID, and then enabling downloads of the structure file as well as parsing its metadata.
Args:
ident (str):
description (str):
chains (str):
mapped_chains (str):
structure_path (str):
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
"""
def __init__(self, ident, description=None, chains=None, mapped_chains=None, structure_path=None, file_type=None):
StructProp.__init__(self, ident, description=description, chains=chains, mapped_chains=mapped_chains,
is_experimental=True, structure_path=structure_path, file_type=file_type)
self.experimental_method = None
self.resolution = None
self.date = None
self.taxonomy_name = None
self.biological_assemblies = DictList()
"""DictList: A list for storing Bioassembly objects related to this PDB ID"""
def download_structure_file(self, outdir, file_type=None, load_header_metadata=True, force_rerun=False):
"""Download a structure file from the PDB, specifying an output directory and a file type. Optionally download
the mmCIF header file and parse data from it to store within this object.
Args:
outdir (str): Path to output directory
file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
load_header_metadata (bool): If header metadata should be loaded into this object, fastest with mmtf files
force_rerun (bool): If structure file should be downloaded even if it already exists
"""
ssbio.utils.double_check_attribute(object=self, setter=file_type, backup_attribute='file_type',
custom_error_text='Please set file type to be downloaded from the PDB: '
'pdb, mmCif, xml, or mmtf')
# XTODO: check if outfile exists using ssbio.utils.force_rerun, pdblist seems to take long if it exists
# I know why - it's because we're renaming the ent to pdb. need to have mapping from file type to final extension
# Then check if file exists, if not then download again
p = PDBList()
with ssbio.utils.suppress_stdout():
structure_file = p.retrieve_pdb_file(pdb_code=self.id, pdir=outdir, file_format=file_type, overwrite=force_rerun)
if not op.exists(structure_file):
log.debug('{}: {} file not available'.format(self.id, file_type))
raise URLError('{}.{}: file not available to download'.format(self.id, file_type))
else:
log.debug('{}: {} file saved'.format(self.id, file_type))
# Rename .ent files to .pdb
if file_type == 'pdb':
new_name = structure_file.replace('pdb', '').replace('ent', 'pdb')
os.rename(structure_file, new_name)
structure_file = new_name
self.load_structure_path(structure_file, file_type)
if load_header_metadata and file_type == 'mmtf':
self.update(parse_mmtf_header(structure_file))
if load_header_metadata and file_type != 'mmtf':
self.update(parse_mmcif_header(download_mmcif_header(pdb_id=self.id, outdir=outdir, force_rerun=force_rerun)))
def get_pisa_complex_predictions(self, outdir, existing_pisa_multimer_xml=None):
if not existing_pisa_multimer_xml:
pisa_xmls = pisa.download_pisa_multimers_xml(pdb_ids=self.id, outdir=outdir,
save_single_xml_files=True)
else:
pisa_xmls = {}
pisa_xmls[self.id] = existing_pisa_multimer_xml
pisa_dict = pisa.parse_pisa_multimers_xml(pisa_xmls[self.id], download_structures=True,
outdir=outdir)
def __json_encode__(self):
# TODO: investigate why saving with # does not work!
to_return = {}
for x in self.__dict__.keys():
if x == 'pdb_title' or x == 'description':
sanitized = ssbio.utils.force_string(getattr(self, x)).replace('#', '-')
else:
to_return.update({x: getattr(self, x)})
return to_return
def parse_mmtf_header(infile):
"""Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
"""
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude and mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude]))
infodict['chemicals'] = chemicals
return infodict
def download_mmcif_header(pdb_id, outdir='', force_rerun=False):
"""Download a mmCIF header file from the RCSB PDB by ID.
Args:
pdb_id: PDB ID
outdir: Optional output directory, default is current working directory
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# TODO: keep an eye on https://github.com/biopython/biopython/pull/943 Biopython PR#493 for functionality of this
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = 'cif'
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
log.debug('{}: saved header file'.format(outfile))
else:
log.debug('{}: header file already saved'.format(outfile))
return outfile
def parse_mmcif_header(infile):
"""Parse a couple important fields from the mmCIF file format with some manual curation of ligands.
If you want full access to the mmCIF file just use the MMCIF2Dict class in Biopython.
Args:
infile: Path to mmCIF file
Returns:
dict: Dictionary of parsed header
"""
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
newdict = {}
try:
mmdict = MMCIF2Dict(infile)
except ValueError as e:
log.exception(e)
return newdict
chemical_ids_exclude = ['HOH']
chemical_types_exclude = ['l-peptide linking','peptide linking']
if '_struct.title' in mmdict:
newdict['pdb_title'] = mmdict['_struct.title']
else:
log.debug('{}: No title field'.format(infile))
if '_struct.pdbx_descriptor' in mmdict:
newdict['description'] = mmdict['_struct.pdbx_descriptor']
else:
log.debug('{}: no description field'.format(infile))
if '_pdbx_database_status.recvd_initial_deposition_date' in mmdict:
newdict['date'] = mmdict['_pdbx_database_status.recvd_initial_deposition_date']
elif '_database_PDB_rev.date' in mmdict:
newdict['date'] = mmdict['_database_PDB_rev.date']
else:
log.debug('{}: no date field'.format(infile))
if '_exptl.method' in mmdict:
newdict['experimental_method'] = mmdict['_exptl.method']
else:
log.debug('{}: no experimental method field'.format(infile))
# TODO: refactor how to get resolutions based on experimental method
if '_refine.ls_d_res_high' in mmdict:
try:
if isinstance(mmdict['_refine.ls_d_res_high'], list):
newdict['resolution'] = [float(x) for x in mmdict['_refine.ls_d_res_high']]
else:
newdict['resolution'] = float(mmdict['_refine.ls_d_res_high'])
except:
try:
newdict['resolution'] = float(mmdict['_em_3d_reconstruction.resolution'])
except:
log.debug('{}: no resolution field'.format(infile))
else:
log.debug('{}: no resolution field'.format(infile))
if '_chem_comp.id' in mmdict:
chemicals_filtered = ssbio.utils.filter_list_by_indices(mmdict['_chem_comp.id'],
ssbio.utils.not_find(mmdict['_chem_comp.type'],
chemical_types_exclude,
case_sensitive=False))
chemicals_fitered = ssbio.utils.filter_list(chemicals_filtered, chemical_ids_exclude, case_sensitive=True)
newdict['chemicals'] = chemicals_fitered
else:
log.debug('{}: no chemical composition field'.format(infile))
if '_entity_src_gen.pdbx_gene_src_scientific_name' in mmdict:
newdict['taxonomy_name'] = mmdict['_entity_src_gen.pdbx_gene_src_scientific_name']
else:
log.debug('{}: no organism field'.format(infile))
return newdict
def download_sifts_xml(pdb_id, outdir='', force_rerun=False):
"""Download the SIFTS file for a PDB ID.
Args:
pdb_id (str): PDB ID
outdir (str): Output directory, current working directory if not specified.
force_rerun (bool): If the file should be downloaded again even if it exists
Returns:
str: Path to downloaded file
"""
baseURL = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/sifts/xml/'
filename = '{}.xml.gz'.format(pdb_id.lower())
outfile = op.join(outdir, filename.split('.')[0] + '.sifts.xml')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
response = urlopen(baseURL + filename)
with open(outfile, 'wb') as f:
f.write(gzip.decompress(response.read()))
return outfile
def map_uniprot_resnum_to_pdb(uniprot_resnum, chain_id, sifts_file):
"""Map a UniProt residue number to its corresponding PDB residue number.
This function requires that the SIFTS file be downloaded,
and also a chain ID (as different chains may have different mappings).
Args:
uniprot_resnum (int): integer of the residue number you'd like to map
chain_id (str): string of the PDB chain to map to
sifts_file (str): Path to the SIFTS XML file
Returns:
(tuple): tuple containing:
mapped_resnum (int): Mapped residue number
is_observed (bool): Indicates if the 3D structure actually shows the residue
"""
# Load the xml with lxml
parser = etree.XMLParser(ns_clean=True)
tree = etree.parse(sifts_file, parser)
root = tree.getroot()
my_pdb_resnum = None
# TODO: "Engineered_Mutation is also a possible annotation, need to figure out what to do with that
my_pdb_annotation = False
# Find the right chain (entities in the xml doc)
ent = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}entity'
for chain in root.findall(ent):
# TODO: IMPORTANT - entityId is not the chain ID!!! it is just in alphabetical order!
if chain.attrib['entityId'] == chain_id:
# Find the "crossRefDb" tag that has the attributes dbSource="UniProt" and dbResNum="your_resnum_here"
# Then match it to the crossRefDb dbResNum that has the attribute dbSource="PDBresnum"
# Check if uniprot + resnum even exists in the sifts file (it won't if the pdb doesn't contain the residue)
ures = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="UniProt"][@dbResNum="%s"]' % uniprot_resnum
my_uniprot_residue = chain.findall(ures)
if len(my_uniprot_residue) == 1:
# Get crossRefDb dbSource="PDB"
parent = my_uniprot_residue[0].getparent()
pres = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}crossRefDb[@dbSource="PDB"]'
my_pdb_residue = parent.findall(pres)
my_pdb_resnum = int(my_pdb_residue[0].attrib['dbResNum'])
# Get <residueDetail dbSource="PDBe" property="Annotation">
# Will be Not_Observed if it is not seen in the PDB
anno = './/{http://www.ebi.ac.uk/pdbe/docs/sifts/eFamily.xsd}residueDetail[@dbSource="PDBe"][@property="Annotation"]'
my_pdb_annotation = parent.findall(anno)
if len(my_pdb_annotation) == 1:
my_pdb_annotation = my_pdb_annotation[0].text
if my_pdb_annotation == 'Not_Observed':
my_pdb_annotation = False
else:
my_pdb_annotation = True
else:
return None, False
return my_pdb_resnum, my_pdb_annotation
def best_structures(uniprot_id, outname=None, outdir=None, seq_ident_cutoff=0.0, force_rerun=False):
"""Use the PDBe REST service to query for the best PDB structures for a UniProt ID.
More information found here: https://www.ebi.ac.uk/pdbe/api/doc/sifts.html
Link used to retrieve results: https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/:accession
The list of PDB structures mapping to a UniProt accession sorted by coverage of the protein and, if the same, resolution.
Here is the ranking algorithm described by the PDB paper:
https://nar.oxfordjournals.org/content/44/D1/D385.full
"Finally, a single quality indicator is also calculated for each entry by taking the harmonic average
of all the percentile scores representing model and model-data-fit quality measures and then subtracting
10 times the numerical value of the resolution (in Angstrom) of the entry to ensure that resolution plays
a role in characterising the quality of a structure. This single empirical 'quality measure' value is used
by the PDBe query system to sort results and identify the 'best' structure in a given context. At present,
entries determined by methods other than X-ray crystallography do not have similar data quality information
available and are not considered as 'best structures'."
Args:
uniprot_id (str): UniProt Accession ID
outname (str): Basename of the output file of JSON results
outdir (str): Path to output directory of JSON results
seq_ident_cutoff (float): Cutoff results based on percent coverage (in decimal form)
force_rerun (bool): Obtain best structures mapping ignoring previously downloaded results
Returns:
list: Rank-ordered list of dictionaries representing chain-specific PDB entries. Keys are:
* pdb_id: the PDB ID which maps to the UniProt ID
* chain_id: the specific chain of the PDB which maps to the UniProt ID
* coverage: the percent coverage of the entire UniProt sequence
* resolution: the resolution of the structure
* start: the structure residue number which maps to the start of the mapped sequence
* end: the structure residue number which maps to the end of the mapped sequence
* unp_start: the sequence residue number which maps to the structure start
* unp_end: the sequence residue number which maps to the structure end
* experimental_method: type of experiment used to determine structure
* tax_id: taxonomic ID of the protein's original organism
"""
outfile = ''
if not outdir:
outdir = ''
# if output dir is specified but not outname, use the uniprot
if not outname and outdir:
outname = uniprot_id
if outname:
outname = op.join(outdir, outname)
outfile = '{}.json'.format(outname)
# Load a possibly existing json file
if not ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
with open(outfile, 'r') as f:
raw_data = json.load(f)
log.debug('{}: loaded existing json file'.format(uniprot_id))
# Otherwise run the web request
else:
# TODO: add a checker for a cached file of uniprot -> PDBs - can be generated within gempro pipeline and stored
response = requests.get('https://www.ebi.ac.uk/pdbe/api/mappings/best_structures/{}'.format(uniprot_id),
data={'key': 'value'})
if response.status_code == 404:
log.debug('{}: 404 returned, probably no structures available.'.format(uniprot_id))
raw_data = {uniprot_id: {}}
else:
log.debug('{}: Obtained best structures'.format(uniprot_id))
raw_data = response.json()
# Write the json file if specified
if outfile:
with open(outfile, 'w') as f:
json.dump(raw_data, f)
log.debug('{}: Saved json file of best structures'.format(uniprot_id))
data = dict(raw_data)[uniprot_id]
# Filter for sequence identity percentage
if seq_ident_cutoff != 0:
for result in data:
if result['coverage'] < seq_ident_cutoff:
data.remove(result)
return data
def blast_pdb(seq, outfile='', outdir='', evalue=0.0001, seq_ident_cutoff=0.0, link=False, force_rerun=False):
"""Returns a list of BLAST hits of a sequence to available structures in the PDB.
Args:
seq (str): Your sequence, in string format
outfile (str): Name of output file
outdir (str, optional): Path to output directory. Default is the current directory.
evalue (float, optional): Cutoff for the E-value - filters for significant hits. 0.001 is liberal, 0.0001 is stringent (default).
seq_ident_cutoff (float, optional): Cutoff results based on percent coverage (in decimal form)
link (bool, optional): Set to True if a link to the HTML results should be displayed
force_rerun (bool, optional): If existing BLAST results should not be used, set to True. Default is False
Returns:
list: Rank ordered list of BLAST hits in dictionaries.
"""
if len(seq) < 12:
raise ValueError('Sequence must be at least 12 residues long.')
if link:
page = 'PDB results page: http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=HTML'.format(seq, evalue)
print(page)
parser = etree.XMLParser(ns_clean=True)
outfile = op.join(outdir, outfile)
if ssbio.utils.force_rerun(force_rerun, outfile):
# Load the BLAST XML results if force_rerun=True
page = 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence={}&eCutOff={}&maskLowComplexity=yes&matrix=BLOSUM62&outputFormat=XML'.format(
seq, evalue)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if outfile:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded BLAST results from REST server')
else:
log.error('BLAST request timed out')
return []
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing BLAST XML results'.format(outfile))
# Get length of original sequence to calculate percentages
len_orig = float(len(seq))
root = tree.getroot()
hit_list = []
for hit in root.findall('BlastOutput_iterations/Iteration/Iteration_hits/Hit'):
info = {}
hitdef = hit.find('Hit_def')
if hitdef is not None:
info['hit_pdb'] = hitdef.text.split('|')[0].split(':')[0].lower()
info['hit_pdb_chains'] = hitdef.text.split('|')[0].split(':')[2].split(',')
# One PDB can align to different parts of the sequence
# Will just choose the top hit for this single PDB
hsp = hit.findall('Hit_hsps/Hsp')[0]
# Number of identical residues
hspi = hsp.find('Hsp_identity')
if hspi is not None:
info['hit_num_ident'] = int(hspi.text)
info['hit_percent_ident'] = int(hspi.text)/len_orig
if int(hspi.text)/len_orig < seq_ident_cutoff:
log.debug('{}: does not meet sequence identity cutoff'.format(hitdef.text.split('|')[0].split(':')[0]))
continue
# Number of similar residues (positive hits)
hspp = hsp.find('Hsp_positive')
if hspp is not None:
info['hit_num_similar'] = int(hspp.text)
info['hit_percent_similar'] = int(hspp.text) / len_orig
# Total number of gaps (unable to align in either query or subject)
hspg = hsp.find('Hsp_gaps')
if hspg is not None:
info['hit_num_gaps'] = int(hspg.text)
info['hit_percent_gaps'] = int(hspg.text) / len_orig
# E-value of BLAST
hspe = hsp.find('Hsp_evalue')
if hspe is not None:
info['hit_evalue'] = float(hspe.text)
# Score of BLAST
hsps = hsp.find('Hsp_score')
if hsps is not None:
info['hit_score'] = float(hsps.text)
hit_list.append(info)
log.debug("{}: Number of BLAST hits".format(len(hit_list)))
return hit_list
def blast_pdb_df(blast_results):
"""Make a dataframe of BLAST results"""
cols = ['hit_pdb', 'hit_pdb_chains', 'hit_evalue', 'hit_score', 'hit_num_ident', 'hit_percent_ident',
'hit_num_similar', 'hit_percent_similar', 'hit_num_gaps', 'hit_percent_gaps']
return pd.DataFrame.from_records(blast_results, columns=cols)
def _property_table():
"""Download the PDB -> resolution table directly from the RCSB PDB REST service.
See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do
Returns:
Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns
"""
url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'
r = requests.get(url)
p = pd.read_csv(StringIO(r.text)).set_index('structureId')
return p
def get_resolution(pdb_id):
"""Quick way to get the resolution of a PDB ID using the table of results from the REST service
Returns infinity if the resolution is not available.
Returns:
float: resolution of a PDB ID in Angstroms
TODO:
- Unit test
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
resolution = _property_table().ix[pdb_id, 'resolution']
if pd.isnull(resolution):
log.debug('{}: no resolution available, probably not an X-ray crystal structure')
resolution = float('inf')
return resolution
def get_release_date(pdb_id):
"""Quick way to get the release date of a PDB ID using the table of results from the REST service
Returns None if the release date is not available.
Returns:
str: Organism of a PDB ID
"""
pdb_id = pdb_id.upper()
if pdb_id not in _property_table().index:
raise ValueError('PDB ID not in property table')
else:
release_date = _property_table().ix[pdb_id, 'releaseDate']
if pd.isnull(release_date):
log.debug('{}: no release date available')
release_date = None
return release_date
def get_num_bioassemblies(pdb_id, cache=False, outdir=None, force_rerun=False):
"""Check if there are bioassemblies using the PDB REST API, and if there are, get the number of bioassemblies
available.
See: https://www.rcsb.org/pages/webservices/rest, section 'List biological assemblies'
Not all PDB entries have biological assemblies available and some have multiple. Details that are necessary to
recreate a biological assembly from the asymmetric unit can be accessed from the following requests.
- Number of biological assemblies associated with a PDB entry
- Access the transformation information needed to generate a biological assembly (nr=0 will return information
for the asymmetric unit, nr=1 will return information for the first assembly, etc.)
A query of https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId=1hv4 returns this::
<nrBioAssemblies structureId="1HV4" hasAssemblies="true" count="2"/>
Args:
pdb_id (str): PDB ID
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
if not outdir:
outdir = os.getcwd()
outfile = op.join(outdir, '{}_nrbiomols.xml'.format(pdb_id))
if ssbio.utils.force_rerun(force_rerun, outfile):
page = 'https://www.rcsb.org/pdb/rest/bioassembly/nrbioassemblies?structureId={}'.format(pdb_id)
req = requests.get(page)
if req.status_code == 200:
response = req.text
# Save the XML file
if cache:
with open(outfile, 'w') as f:
f.write(response)
# Parse the XML string
tree = etree.ElementTree(etree.fromstring(response, parser))
log.debug('Loaded bioassembly information from REST server')
else:
log.error('Request timed out')
req.raise_for_status()
else:
tree = etree.parse(outfile, parser)
log.debug('{}: Loaded existing XML results'.format(outfile))
r = tree.getroot()
has_biomols = r.get('hasAssemblies')
if has_biomols == 'true':
has_biomols = True
else:
has_biomols = False
if has_biomols:
num_biomols = r.get('count')
else:
num_biomols = 0
num_biomols = int(num_biomols)
return num_biomols
def get_bioassembly_info(pdb_id, biomol_num, cache=False, outdir=None, force_rerun=False):
"""Get metadata about a bioassembly from the RCSB PDB's REST API.
See: https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId=1hv4&nr=1
The API returns an XML file containing the information on a biological assembly that looks like this::
<bioassembly structureId="1HV4" assemblyNr="1" method="PISA" desc="author_and_software_defined_assembly">
<transformations operator="1" chainIds="A,B,C,D">
<transformation index="1">
<matrix m11="1.00000000" m12="0.00000000" m13="0.00000000" m21="0.00000000" m22="1.00000000" m23="0.00000000" m31="0.00000000" m32="0.00000000" m33="1.00000000"/>
<shift v1="0.00000000" v2="0.00000000" v3="0.00000000"/>
</transformation>
</transformations>
</bioassembly>
Args:
pdb_id (str): PDB ID
biomol_num (int): Biological assembly number you are interested in
cache (bool): If the XML file should be downloaded
outdir (str): If cache, then specify the output directory
force_rerun (bool): If cache, and if file exists, specify if API should be queried again
"""
parser = etree.XMLParser(ns_clean=True)
#
# if not outdir:
# outdir = os.getcwd()
# outfile = op.join(outdir, '{}.xml'.format(self.id))
#
# if ssbio.utils.force_rerun(force_rerun, outfile):
# page = 'https://www.rcsb.org/pdb/rest/bioassembly/bioassembly?structureId={}&nr={}'.format(
# self.original_pdb_id, biomol_num)
# req = requests.get(page)
#
# if req.status_code == 200:
# response = req.text
#
# # Save the XML file
# if cache:
# with open(outfile, 'w') as f:
# f.write(response)
#
# # Parse the XML string
# r = xmltodict.parse(response)
# log.debug('Loaded bioassembly information from REST server')
# else:
# log.error('Request timed out')
# req.raise_for_status()
# else:
# with open(outfile, 'r') as f:
# r = xmltodict.parse(f.read())
# log.debug('{}: Loaded existing XML results'.format(outfile))
#
# self.biomol_to_chain_dict[biomol_num] = {'chains': r['bioassembly']['transformations']['@chainIds'],
# 'multiplier': len(r['bioassembly']['transformations']['transformation'])}
# # TODO: figure out how to store matrices etc.
#
# log.info('{}_{}: ')
def download_biomol(pdb_id, biomol_num, outdir, file_type='pdb', force_rerun=False):
import zlib
from six.moves.urllib_error import URLError
from six.moves.urllib.request import urlopen, urlretrieve
import contextlib
ssbio.utils.make_dir(outdir)
server_folder = pdb_id[1:3]
if file_type == 'pdb':
# server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/coordinates/divided/{}/'.format(server_folder)
server = 'https://files.rcsb.org/download/'
server_filename = pdb_id + '.pdb%i.gz' % biomol_num
local_filename = pdb_id + '_bio%i.pdb' % biomol_num
outfile = op.join(outdir, local_filename)
elif file_type.lower() == 'mmcif' or file_type.lower() == 'cif':
server = 'ftp://ftp.wwpdb.org/pub/pdb/data/biounit/mmCIF/divided/{}/'.format(server_folder)
server_filename = pdb_id + '-assembly%i.cif.gz' % biomol_num
local_filename = pdb_id + '_bio%i.cif' % biomol_num
outfile = op.join(outdir, local_filename)
else:
raise ValueError('Biological assembly only available in PDB or mmCIF file types.')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
download_link = op.join(server, server_filename)
try:
with contextlib.closing(urlopen(download_link)) as f:
decompressed_data = zlib.decompress(f.read(), 16 + zlib.MAX_WBITS)
with open(op.join(outdir, local_filename), 'wb') as f:
f.write(decompressed_data)
except URLError as e:
print(e)
return None
return outfile
########################################################################################################################
########################################################################################################################
# DEPRECATED FUNCTIONS
########################################################################################################################
########################################################################################################################
@deprecation.deprecated(deprecated_in="1.0", removed_in="2.0",
details="Use Biopython's PDBList.retrieve_pdb_file function instead")
def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False):
"""Download a structure from the RCSB PDB by ID. Specify the file type desired.
Args:
pdb_id: PDB ID
file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz
outdir: Optional output directory
only_header: If only the header file should be downloaded
force_rerun: If the file should be downloaded again even if it exists
Returns:
str: Path to outfile
"""
# method in biopython. extra file types have not been added to biopython download yet
pdb_id = pdb_id.lower()
file_type = file_type.lower()
file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz']
if file_type not in file_types:
raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz')
if file_type == 'mmtf':
file_type = 'mmtf.gz'
if file_type.endswith('.gz'):
gzipped = True
else:
gzipped = False
if file_type == 'mmcif':
file_type = 'cif'
if only_header:
folder = 'header'
outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type))
else:
folder = 'download'
outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
if file_type == 'mmtf.gz' or file_type == 'mmtf':
mmtf_api = '1.0'
download_link = 'http://mmtf.rcsb.org/v{}/full/{}.mmtf.gz'.format(mmtf_api, pdb_id)
else:
download_link = 'http://files.rcsb.org/{}/{}.{}'.format(folder, pdb_id, file_type)
urlretrieve(download_link, outfile)
if gzipped:
outfile = ssbio.utils.gunzip_file(infile=outfile,
outfile=outfile.strip('.gz'),
outdir=outdir,
delete_original=False,
force_rerun_flag=force_rerun)
log.debug('{}: saved structure file'.format(outfile))
else:
if file_type == 'mmtf.gz':
outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf'))
log.debug('{}: structure file already saved'.format(outfile))
return outfile
| mit |
weidel-p/nest-simulator | pynest/examples/pulsepacket.py | 12 | 11358 | # -*- coding: utf-8 -*-
#
# pulsepacket.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Pulse packet example
--------------------
This script compares the average and individual membrane potential excursions
in response to a single pulse packet with an analytically acquired voltage
trace (see: Diesmann [1]_)
A pulse packet is a transient spike volley with a Gaussian rate profile.
The user can specify the neural parameters, the parameters of the
pulse-packet and the number of trials.
References
~~~~~~~~~~~~
.. [1] Diesmann M. 2002. Dissertation. Conditions for stable propagation of
synchronous spiking in cortical neural networks: Single neuron dynamics
and network properties.
http://d-nb.info/968772781/34.
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting.
import scipy.special as sp
import nest
import numpy
import matplotlib.pyplot as plt
# Properties of pulse packet:
a = 100 # number of spikes in one pulse packet
sdev = 10. # width of pulse packet (ms)
weight = 0.1 # PSP amplitude (mV)
pulsetime = 500. # occurrence time (center) of pulse-packet (ms)
# Network and neuron characteristics:
n_neurons = 100 # number of neurons
cm = 200. # membrane capacitance (pF)
tau_s = 0.5 # synaptic time constant (ms)
tau_m = 20. # membrane time constant (ms)
V0 = 0.0 # resting potential (mV)
Vth = numpy.inf # firing threshold, high value to avoid spiking
# Simulation and analysis parameters:
simtime = 1000. # how long we simulate (ms)
simulation_resolution = 0.1 # (ms)
sampling_resolution = 1. # for voltmeter (ms)
convolution_resolution = 1. # for the analytics (ms)
# Some parameters in base units.
Cm = cm * 1e-12 # convert to Farad
Weight = weight * 1e-12 # convert to Ampere
Tau_s = tau_s * 1e-3 # convert to sec
Tau_m = tau_m * 1e-3 # convert to sec
Sdev = sdev * 1e-3 # convert to sec
Convolution_resolution = convolution_resolution * 1e-3 # convert to sec
###############################################################################
# This function calculates the membrane potential excursion in response
# to a single input spike (the equation is given for example in Diesmann [1]_,
# eq.2.3).
# It expects:
#
# * ``Time``: a time array or a single time point (in sec)
# * ``Tau_s`` and ``Tau_m``: the synaptic and the membrane time constant (in sec)
# * ``Cm``: the membrane capacity (in Farad)
# * ``Weight``: the synaptic weight (in Ampere)
#
# It returns the provoked membrane potential (in mV)
def make_psp(Time, Tau_s, Tau_m, Cm, Weight):
term1 = (1 / Tau_s - 1 / Tau_m)
term2 = numpy.exp(-Time / Tau_s)
term3 = numpy.exp(-Time / Tau_m)
PSP = (Weight / Cm * numpy.exp(1) / Tau_s *
(((-Time * term2) / term1) + (term3 - term2) / term1 ** 2))
return PSP * 1e3
###############################################################################
# This function finds the exact location of the maximum of the PSP caused by a
# single input spike. The location is obtained by setting the first derivative
# of the equation for the PSP (see ``make_psp()``) to zero. The resulting
# equation can be expressed in terms of a `LambertW function`.
# This function expects:
#
# * ``Tau_s`` and ``Tau_m``: the synaptic and membrane time constant (in sec)
#
# It returns the location of the maximum (in sec)
def LambertWm1(x):
# Using scipy to mimic the gsl_sf_lambert_Wm1 function.
return sp.lambertw(x, k=-1 if x < 0 else 0).real
def find_loc_pspmax(tau_s, tau_m):
var = tau_m / tau_s
lam = LambertWm1(-numpy.exp(-1 / var) / var)
t_maxpsp = (-var * lam - 1) / var / (1 / tau_s - 1 / tau_m) * 1e-3
return t_maxpsp
###############################################################################
# First, we construct a Gaussian kernel for a given standard derivation
# (``sig``) and mean value (``mu``). In this case the standard derivation is
# the width of the pulse packet (see [1]_).
sig = Sdev
mu = 0.0
x = numpy.arange(-4 * sig, 4 * sig, Convolution_resolution)
term1 = 1 / (sig * numpy.sqrt(2 * numpy.pi))
term2 = numpy.exp(-(x - mu) ** 2 / (sig ** 2 * 2))
gauss = term1 * term2 * Convolution_resolution
###############################################################################
# Second, we calculate the PSP of a neuron due to a single spiking input.
# (see Diesmann 2002, eq. 2.3).
# Since we do that in discrete time steps, we first construct an array
# (``t_psp``) that contains the time points we want to consider. Then, the
# function ``make_psp()`` (that creates the PSP) takes the time array as its
# first argument.
t_psp = numpy.arange(0, 10 * (Tau_m + Tau_s), Convolution_resolution)
psp = make_psp(t_psp, Tau_s, Tau_m, Cm, Weight)
###############################################################################
# Now, we want to normalize the PSP amplitude to one. We therefore have to
# divide the PSP by its maximum ([1]_ sec 6.1). The function
# ``find_loc_pspmax()`` returns the exact time point (``t_pspmax``) when we
# expect the maximum to occur. The function ``make_psp()`` calculates the
# corresponding PSP value, which is our PSP amplitude (``psp_amp``).
t_pspmax = find_loc_pspmax(Tau_s, Tau_m)
psp_amp = make_psp(t_pspmax, Tau_s, Tau_m, Cm, Weight)
psp_norm = psp / psp_amp
###############################################################################
# Now we have all ingredients to compute the membrane potential excursion
# (`U`). This calculation implies a convolution of the Gaussian with the
# normalized PSP (see [1]_, eq. 6.9). In order to avoid an offset in the
# convolution, we need to add a pad of zeros on the left side of the
# normalized PSP. Later on we want to compare our analytical results with the
# simulation outcome. Therefore we need a time vector (`t_U`) with the correct
# temporal resolution, which places the excursion of the potential at the
# correct time.
psp_norm = numpy.pad(psp_norm, [len(psp_norm) - 1, 1])
U = a * psp_amp * numpy.convolve(gauss, psp_norm)
ulen = len(U)
t_U = (convolution_resolution * numpy.linspace(-ulen / 2., ulen / 2., ulen) +
pulsetime + 1.)
###############################################################################
# In this section we simulate a network of multiple neurons.
# All these neurons receive an individual pulse packet that is drawn from a
# Gaussian distribution.
#
# We reset the Kernel, define the simulation resolution and set the
# verbosity using ``set_verbosity`` to suppress info messages.
nest.ResetKernel()
nest.SetKernelStatus({'resolution': simulation_resolution})
nest.set_verbosity("M_WARNING")
###############################################################################
# Afterwards we create several neurons, the same amount of
# pulse-packet-generators and a voltmeter. All these nodes/devices
# have specific properties that are specified in device specific
# dictionaries (here: `neuron_pars` for the neurons, `ppg_pars`
# for the and pulse-packet-generators and `vm_pars` for the voltmeter).
neuron_pars = {
'V_th': Vth,
'tau_m': tau_m,
'tau_syn_ex': tau_s,
'C_m': cm,
'E_L': V0,
'V_reset': V0,
'V_m': V0
}
neurons = nest.Create('iaf_psc_alpha', n_neurons, neuron_pars)
ppg_pars = {
'pulse_times': [pulsetime],
'activity': a,
'sdev': sdev
}
ppgs = nest.Create('pulsepacket_generator', n_neurons, ppg_pars)
vm_pars = {'interval': sampling_resolution}
vm = nest.Create('voltmeter', 1, vm_pars)
###############################################################################
# Now, we connect each pulse generator to one neuron via static synapses.
# We want to keep all properties of the static synapse constant except the
# synaptic weight. Therefore we change the weight with the help of the command
# ``SetDefaults``.
# The command ``Connect`` connects all kinds of nodes/devices. Since multiple
# nodes/devices can be connected in different ways e.g., each source connects
# to all targets, each source connects to a subset of targets or each source
# connects to exactly one target, we have to specify the connection. In our
# case we use the ``one_to_one`` connection routine since we connect one pulse
# generator (source) to one neuron (target).
# In addition we also connect the `voltmeter` to the `neurons`.
nest.SetDefaults('static_synapse', {'weight': weight})
nest.Connect(ppgs, neurons, 'one_to_one')
nest.Connect(vm, neurons)
###############################################################################
# In the next step we run the simulation for a given duration in ms.
nest.Simulate(simtime)
###############################################################################
# Finally, we record the membrane potential, when it occurred and to which
# neuron it belongs. The sender and the time point of a voltage
# data point at position x in the voltage array (``V_m``), can be found at the
# same position x in the sender (`senders`) and the time array (`times`).
Vm = vm.get('events', 'V_m')
times = vm.get('events', 'times')
senders = vm.get('events', 'senders')
###############################################################################
# Here we plot the membrane potential derived from the theory and from the
# simulation. Since we simulate multiple neurons that received slightly
# different pulse packets, we plot the individual and the averaged membrane
# potentials.
#
# We plot the analytical solution U (the resting potential V0 shifts the
# membrane potential up or downwards).
plt.plot(t_U, U + V0, 'r', lw=2, zorder=3, label='analytical solution')
###############################################################################
# Then we plot all individual membrane potentials.
# The time axes is the range of the simulation time in steps of ms.
Vm_single = [Vm[senders == n.global_id] for n in neurons]
simtimes = numpy.arange(1, simtime)
for idn in range(n_neurons):
if idn == 0:
plt.plot(simtimes, Vm_single[idn], 'gray',
zorder=1, label='single potentials')
else:
plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1)
###############################################################################
# Finally, we plot the averaged membrane potential.
Vm_average = numpy.mean(Vm_single, axis=0)
plt.plot(simtimes, Vm_average, 'b', lw=4,
zorder=2, label='averaged potential')
plt.legend()
plt.xlabel('time (ms)')
plt.ylabel('membrane potential (mV)')
plt.xlim((-5 * (tau_m + tau_s) + pulsetime,
10 * (tau_m + tau_s) + pulsetime))
plt.show()
| gpl-2.0 |
Lyleo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| gpl-3.0 |
BhallaLab/moose-full | moose-examples/snippets/switchKineticSolvers.py | 2 | 5089 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import moose
import pylab
import numpy
import matplotlib.pyplot as plt
import sys
def runAndSavePlots( name ):
runtime = 20.0
moose.reinit()
moose.start( runtime )
pa = moose.Neutral( '/model/graphs/' + name )
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
if ( x.tick != -1 ):
tabname = '/model/graphs/' + name + '/' + x.name + '.' + name
y = moose.Table( tabname )
y.vector = x.vector
y.tick = -1
# Takes args ee, gsl, or gssa
def switchSolvers( solver ):
if ( moose.exists( 'model/kinetics/stoich' ) ):
moose.delete( '/model/kinetics/stoich' )
moose.delete( '/model/kinetics/ksolve' )
compt = moose.element( '/model/kinetics' )
if ( solver == 'gsl' ):
ksolve = moose.Ksolve( '/model/kinetics/ksolve' )
if ( solver == 'gssa' ):
ksolve = moose.Gsolve( '/model/kinetics/ksolve' )
if ( solver != 'ee' ):
stoich = moose.Stoich( '/model/kinetics/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = "/model/kinetics/##"
def main():
"""
At zero order, you can select the solver you want to use within the
function moose.loadModel( filename, modelpath, solver ).
Having loaded in the model, you can change the solver to use on it.
This example illustrates how to assign and change solvers for a
kinetic model. This process is necessary in two situations:
* If we want to change the numerical method employed, for example,
from deterministic to stochastic.
* If we are already using a solver, and we have changed the reaction
network by adding or removing molecules or reactions.
Note that we do not have to change the solvers if the volume or
reaction rates change.
In this example the model is loaded in with a gsl solver. The
sequence of solver calculations is:
#. gsl
#. ee
#. gsl
#. gssa
#. gsl
If you're removing the solvers, you just delete the stoichiometry
object and the associated ksolve/gsolve. Should there be diffusion
(a dsolve)then you should delete that too. If you're
building the solvers up again, then you must do the following
steps in order:
#. build up the ksolve/gsolve and stoich (any order)
#. Assign stoich.ksolve
#. Assign stoich.path.
See the Reaction-diffusion section should you want to do diffusion
as well.
"""
solver = "gsl" # Pick any of gsl, gssa, ee..
mfile = '../genesis/kkit_objects_example.g'
modelId = moose.loadModel( mfile, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
compt = moose.element( '/model/kinetics' )
compt.volume = 1e-19
runAndSavePlots( 'gsl' )
#########################################################
switchSolvers( 'ee' )
runAndSavePlots( 'ee' )
#########################################################
switchSolvers( 'gsl' )
runAndSavePlots( 'gsl2' )
#########################################################
switchSolvers( 'gssa' )
runAndSavePlots( 'gssa' )
#########################################################
switchSolvers( 'gsl' )
runAndSavePlots( 'gsl3' )
#########################################################
# Display all plots.
fig = plt.figure( figsize = (12, 10) )
orig = fig.add_subplot( 511 )
gsl = fig.add_subplot( 512 )
ee = fig.add_subplot( 513 )
gsl2 = fig.add_subplot( 514 )
gssa = fig.add_subplot( 515 )
plotdt = moose.element( '/clock' ).tickDt[18]
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
orig.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/gsl/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
gsl.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/ee/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
ee.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/gsl2/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
gsl2.plot( t, x.vector, label=x.name )
for x in moose.wildcardFind( '/model/graphs/gssa/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * plotdt
gssa.plot( t, x.vector, label=x.name )
plt.legend()
pylab.show()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
zhenv5/scikit-learn | sklearn/neighbors/approximate.py | 71 | 22357 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
Subsets and Splits