repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
caganze/wisps
|
wisps/data_analysis/selection_criteria.py
|
1
|
22360
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This contains the main script I used to define selection criteria
"""
import matplotlib.patches as patches
import random as rd
from itertools import combinations
import pickle
from functools import reduce
from .spectrum_tools import *
from shapey import Box
from .indices import*
from .spex_indices import *
from .initialize import *
from tqdm import tqdm
from ..data_sets import datasets
from ..utils.tools import get_distance, make_spt_number
import pandas as pd
import statsmodels.nonparametric.kernel_density as kde
from matplotlib.path import Path
import matplotlib
#load a previous sample of potential brown dwarfs
mjdf=datasets['manjavacas']
scndf=datasets['schneider']
############################################
mjdf['spt']= np.vstack(mjdf.spt.apply(make_spt_number).values)[:,0]
scndf['spt']= np.vstack(scndf.spt.apply(make_spt_number).values)[:,0]
class IndexSpace(object):
"""
index-index space object
Attributes:
name (str): (for example: 'CH_4/J-Cont H-cont/J-Cont')
shapes (list): rectangular boxes from shapes
completeness (list): % of spex templates selected for each box
contamination (list): % of contaminants for each box
templates (pandas dataframe): a 3-column table (name, x_index, y_index) for all templates
subdwarfs (pandas dataframe): a 3-column table (name, x_index, y_index) for all subdwarfs
contaminants (pandas dataframe): a 3-column table (name, x_index, y_index) for all subdwarfs
"""
def __init__(self, **kwargs):
self._shapes=[] #all the shapes
self._contaminants=None
self._spex_templates=None
self._contamination={} #contamination
self._completeness={}
self.isBest= False #flag to specify if criterion will be used in the selectionprocess
self.xkey=kwargs.get('xkey', ' ')
self.ykey=kwargs.get('ykey', ' ')
self._spex_sample=None
self._subdwarfs=kwargs.get('subdwarfs', None)
self._false_negative=None
if not self._subdwarfs is None: self.subdwarfs=self._subdwarfs
def __repr__(self):
return 'index-index space of '+ self.name
@property
def name(self):
return self.xkey+' '+ self.ykey
@property
def shapes(self):
return self._shapes
@property
def contamination(self):
return self._contamination
@property
def completeness(self):
return self._completeness
@property
def contaminants(self):
"""
A pandas dataframe of contaminants
"""
return self._contaminants
@contaminants.setter
def contaminants(self, new_conts):
"""
Must be a pandas dataframe with columns
"""
df=Annotator.reformat_table(new_conts[[self.xkey, self.ykey]])
df=df.dropna(how='any')
df.columns=['x', 'y']
#calculate the contamination based on the previous sample of potential brown dwarfs
self._contaminants=new_conts[[self.xkey, self.ykey, 'Names']]
#
#true_bds=Annotator.reformat_table(df200[[self.xkey, self.ykey]].applymap(eval)).values.T
#print (true_bds)
cont= {}
fn={}
new_shapes=[]
for s in self.shapes:
s.datatype='contam'
s.color=None
s.data=np.array([df.x, df.y])
slctd=s.select(s.data)
cont[s.shape_name]=len(slctd.T)/len(s.data.T)
new_shapes.append(s)
self._contamination=cont
self._shapes=new_shapes
@property
def subdwarfs(self):
return self._subdwarfs[[self.xkey, self.ykey, 'Names']]
@subdwarfs.setter
def subdwarfs(self, sds):
sds=sds.dropna(how='any')
#print (sds)
sds=sds[[self.xkey, self.ykey, 'Names', 'Spts']]
sds['data_type']='subdwarf'
self._subdwarfs=sds
@property
def templates(self):
"""
a pandas dataframe
"""
return self._spex_templates
@templates.setter
def templates(self, new_data):
"""
Must pass pandas dataframe
should at least have xkey and ykey of the index-space
Create selection shapes given for each spt range
input: temalates columns should have at least
x_key, y_key,
"""
new_data=new_data.dropna()
#only keep columns that we need
df= new_data[[self.xkey, self.ykey, 'Spts', 'Names']]
#df.columns=['x', 'y', 'Spts', 'Names']
# print (self.xkey, self.ykey, new_data.columns)
df['data_type']='templates'
self._spex_sample= new_data[[self.xkey, self.ykey, 'Spts', 'Names']]
#print (self._spex_sample.shape)
annotated_df=None
if not self._subdwarfs is None:
annotated_df=Annotator.group_by_spt(df, add_subdwarfs=True, subdwarfs=self._subdwarfs)
else:
annotated_df=Annotator.group_by_spt(df)
self._calc_completeness(annotated_df)
#@classmethod
def add_box(self, df, name, color, coeff, xshift=0.15):
"""
Adds a box to the selection criteria
"""
#print (df)
#reformat the data
x=np.array([*list(df.x.values)])
y=np.array([*list(df.y.values)])
ddf=pd.DataFrame([x[:,0], y[:,0]]).transpose().dropna()
#if name =='Y dwarfs':
# print ()
#create a box
box=Box()
#if name.lower().startswith('l') or name.lower().startswith('y') or name.lower().startswith('m'):
# box=Box(shapetype='rectangle')
box.scatter_coeff=coeff
box.alpha=.1
box.color=color
box.shape_name=name
box.edgecolor='#2ECC40'
box.xshift=xshift
#print (ddf.values.T, name)
box.data=np.array(ddf.values.T)
#add this to the existing
self._shapes.append(box)
self._completeness[name]=box.efficiency
def _calc_completeness(self, annotated_df):
"""
This is how each box is defined after the user passes the templates property
Args:
annotated_df (pandas dataframe):must have a column 'spt_range' to differentatiate between M5-L0, ETC...
Returns:
None
"""
grouped=annotated_df.groupby('spt_range')
cpls={}
new_shapes=[]
for name, group in grouped:
df=group.dropna()
#print (df.shape, len(df))
if len(df) > 0.0:
#print('name of the group ...{} length ... {}'.format(name, len(group)))
to_use=df[[self.xkey, self.ykey]]
to_use.columns=['x', 'y']
self.add_box(to_use, name, '#0074D9', 5., xshift=0.3)
#add an extra box of late ts from manjavacas et al
#print (self.completeness)
mdf=mjdf[[self.xkey, self.ykey, 'spt']]
mdf.columns=['x', 'y', 'spt']
#add schneider objects
sdf= scndf[[self.xkey, self.ykey, 'spt']]
sdf.columns=['x', 'y', 'spt']
#ydwarfs=(mdf[mdf['spt'].apply(make_spt_number)>38].append(sdf)).reset_index(drop=True)
#print (ydwarfs)
self.add_box(sdf, 'Y dwarfs', '#0074D9', 3.0)
#print (self.completeness)
return
def select(self, df, **kwargs):
"""
Method to select a bunch objects using this specific index.
Args:
df (pandas dataframe): a table that contains at least two columns of the index name for example: \
if this index is named "one two", \
then the table must have "one" and "two" as columns
**kwargs: you can specify which boxes to use in the selection e.g use the ["M5-L0", "T5-Y0"] boxes only
Returns:
a list of the objects that fit in the boxes
"""
#if not
df=df[[self.xkey, self.ykey, 'Names']]
df.columns=['x', 'y', 'names']
selected=[]
#new_shapes=[]
#can specify shapes to use (need to be passed as classes)
input_boxes= kwargs.get('shapes_to_use', self.shapes)
#if the user passed a string, retrieve the name of the box
use=input_boxes
if all(isinstance(bx, str) for bx in use):
use=[x for x in self.shapes if x.shape_name in input_boxes]
if kwargs.get('table', False):
return
if not kwargs.get('table', False):
for s in use:
rows=s.select(df[['x', 'y']]).index
sels=list(np.unique(df['names'].loc[rows].tolist()))
selected.append(sels)
return list(selected)
def new_plot_best(self, box_label, **kwargs):
#newplotiing function only looking at one box
bs=self.shapes
bx=[x for x in bs if x.shape_name==box_label][0]
spex_df=Annotator.reformat_table(datasets['spex']).reset_index(drop=True)
manj=Annotator.reformat_table(datasets['manjavacas']).reset_index(drop=True)
schn=Annotator.reformat_table(datasets['schneider']).reset_index(drop=True)
subdwarfs=Annotator.reformat_table(self._subdwarfs).reset_index(drop=True)
cands=Annotator.reformat_table(datasets['candidates']).reset_index(drop=True)
#rfcands=Annotator.reformat_table(datasets['rf_classified_not_indices']).reset_index(drop=True)
manj['Spts']=manj.spt.apply(make_spt_number)
schn['Spts']=schn.spt.apply(make_spt_number)
cands['Spts']=cands.spt.apply(make_spt_number)
#rfcands['Spts']=rfcands.spt.apply(make_spt_number)
spex_df=Annotator.group_by_spt(spex_df, spt_label='Spts')
schn=Annotator.group_by_spt(schn, spt_label='Spts')
manj=Annotator.group_by_spt(manj, spt_label='Spts')
cands=Annotator.group_by_spt(cands, spt_label='Spts')
#rfcands=Annotator.group_by_spt(rfcands)
ydwarfs=manj[manj['Spts'].apply(lambda x: x>37)]
#do everything in log-space
spex_df[INDEX_NAMES]=(spex_df[INDEX_NAMES].applymap(float))#.applymap(np.log10)
schn[INDEX_NAMES]=(schn[INDEX_NAMES].applymap(float))#.applymap(np.log10)
manj[INDEX_NAMES]=(manj[INDEX_NAMES].applymap(float))#.applymap(np.log10)
cands[INDEX_NAMES]=(cands[INDEX_NAMES].applymap(float))#.applymap(np.log10)
ydwarfs[INDEX_NAMES]=(ydwarfs[INDEX_NAMES].applymap(float))#.applymap(np.log10)
####################################
if 'ax' in kwargs:
ax= kwargs.get('ax', None)
else:
fig=plt.figure(figsize=kwargs.get('figsize', (8,8)))
ax=fig.add_subplot(111)
conts=self.contaminants
xkey, ykey=self.xkey,self.ykey
ax.scatter((self.contaminants[xkey]).apply(float), (self.contaminants[ykey]).apply(float), marker='o', facecolors='none', edgecolors='#AAAAAA', label='Contaminants')
if box_label.lower()=='y dwarfs':
ax.scatter(ydwarfs[xkey], ydwarfs[ykey], label='Y dwarfs')
if box_label.lower() =='subdwarfs':
ax.scatter(subdwarfs[xkey].apply(float), subdwarfs[ykey].apply(float), label='subdwarfs')
if (box_label.lower != 'y dwarfs') and (box_label.lower != 'subdwarfs'):
s_spex=spex_df[spex_df.spt_range==box_label]
#s_manj=manj[manj.spt_range==box_label]
#s_schn=schn[schn.spt_range==box_label]
s_cand=cands[cands.spt_range==box_label]
#s_rf=rfcands[rfcands.spt_range==box_label]
#print (s_cand)
ax.scatter(s_spex[xkey], s_spex[ykey], s=5, label='SpeX')
#ax.scatter(s_manj[xkey], s_manj[ykey], marker='P', facecolors='none', edgecolors='#FF851B', label='Manjavacas')
#ax.scatter(s_schn[xkey], s_schn[ykey], marker='^', facecolors='none', edgecolors='#B10DC9', label='Schneider')
ax.scatter((s_cand[xkey]).apply(float).round(3), (s_cand[ykey]).apply(float).round(3), marker='x', facecolors='#111111', edgecolors='#2ECC40', label='candidates')
#ax.scatter((s_rf[xkey]).apply(float).round(3), (s_rf[ykey]).apply(float).round(3), marker='x', facecolors='#2ECC40', edgecolors='#2ECC40', label='rf candidates')
bx.plot( ax=ax, only_shape=True, highlight=False)
filename=kwargs.get('filename', 'none')
#set limits of the plts from templates
ax.set_xlim(kwargs.get('xlim', [0., 10.]))
ax.set_ylim(kwargs.get('ylim', [0., 10.]))
#indices that use the continuum have ranges that are too high, logscale this?
ax.set_xlabel(r'$Log '.ljust(2)+str(self.name.split(' ')[0])+'$', fontsize=18)
ax.set_ylabel(r'$Log '.ljust(2)+str(self.name.split(' ')[1])+'$', fontsize=18)
#ax.legend(prop={'size': 16})
if kwargs.get('save', False):
filenm=kwargs.get('filename', OUTPUT_FIGURES+'/indices/index_plot_'+self.name.replace('/','_').replace('-', '_').replace(' ', '_')+'.pdf')
plt.savefig(filenm, dpi=100, bbox_inches='tight')
def plot(self, **kwargs):
"""
Plotting function for an index-index space
"""
dict1={'data':[self.templates[self.xkey].tolist(), self.templates[self.ykey].tolist()],
'name':'Templates', \
'color':'#0074D9',
'marker':'D', 'ms':25, 'cmap':'YlOrBr', \
'alpha':1, 'edgecolor':'none', 'linewidths':3}
dict3={'data':[self.contaminants[self.xkey].tolist(),self.contaminants[self.ykey].tolist()],
'name':'Contaminants', \
'color':'#FFDC00', 'marker':'+', 'ms':5, 'cmap':'YlOrBr', \
'alpha':0.3, 'edgecolor':'#111111', 'linewidths':3}
ddicts=[ dict1]
if kwargs.get('show_subdwarfs', True):
dict2={'data':[self.subdwarfs[self.xkey].tolist(),self.subdwarfs[self.ykey].tolist()],
'name':'Subdwarfs', \
'color':'#FFDC00', 'marker':'D', 'ms':5, 'cmap':'YlOrBr', \
'alpha':1, 'edgecolor':'k', 'linewidths':3}
ddicts.append(dict2)
datadicts=kwargs.get('data_dicts', ddicts)
if 'ax' in kwargs:
ax1= kwargs.get('ax', None)
else:
fig=plt.figure(figsize=kwargs.get('figsize', (8,8)))
ax1=fig.add_subplot(111)
#plot contaminants
x=np.array(dict3['data'][0])
y=np.array(dict3['data'][1])
small_df=pd.DataFrame([x, y]).applymap(np.float).transpose()
small_df.columns=['x', 'y']
small_df=small_df.replace([np.inf, -np.inf], np.nan)
small_df=small_df.dropna(how='any')
#
#print (small_df)
#only show things in range of the plot
#small_df=small_df[(small_df.x.between(xlims[0],xlims[1]))& (small_df.y.between(ylims[0], ylims[1] ))]
xd=small_df.x.as_matrix() #rawr xd :) lol
yd=small_df.y.as_matrix()
#hist2d=ax1.hist2d(x=xd, y=yd, cmap=kwargs.get('cmap', MYCOLORMAP), bins=10, alpha=1.0)
#cbar = fig.colorbar(hist2d[3], orientation='horizontal')
ax1.scatter(xd, yd, marker='o', facecolors='none', edgecolors='#AAAAAA', label='Contaminants')
#plot templates
templates_data =[d['data'] for d in datadicts if d['name']=='Templates']
templates_data=(np.array([*templates_data]))[0]
#print (templates_data.shape)
if templates_data != []:
xmean=np.mean(templates_data[0][:,0])
xstd=np.std(templates_data[0][:,0])
ymean=np.mean(templates_data[1][:,0])
ystd=np.std(templates_data[1][:,0])
xmin=xmean-3.0*xstd
ymin=ymean-3.0*ystd
if xmin<0.0:
xmin=0.0
if ymin<0.0:
ymin=0.0
xlims=[xmin, xmean+3.0*xstd]
ylims=[ymin, ymean+3.0*ystd]
else:
xlims=self.shapes[-1].xrange
ylims= self.shapes[-1].yrange
for d in datadicts:
ax1.scatter(d['data'][0],d['data'][1], facecolors=d['color'], marker='.',
s=d['ms'], cmap=d['cmap'], label=d['name'], alpha=d['alpha'], edgecolors=d['edgecolor'],
linewidths=d['linewidths'])
#ax1.hist2d(x=xd, y=yd, cmap='Reds')
df_=pd.DataFrame()
df_['x']=d['data'][0]
df_['y']=d['data'][1]
#print (rmv)
for s in self.shapes:
if s.shape_name in kwargs.get('highlight', [None]):
s.alpha=.5
s.plot( ax=ax1, only_shape=True, highlight=False, alpha=1.)
else: pass
#s.color='none'
#print (""" itsssssssss """, kwargs.get('highlight', None))
#print ('name', s.shape_name, 'color', s.color, 'linewidth', s.linewidth)
ax1.set_xlabel('$'+str(self.name.split(' ')[0])+'$', fontsize=18)
ax1.set_ylabel('$'+str(self.name.split(' ')[1])+'$', fontsize=18)
#plot manjavacas data
#FF851
mdf=Annotator.reformat_table(mjdf)
sdf=Annotator.reformat_table(scndf)
ax1.scatter(mdf[self.xkey], mdf[self.ykey], marker='P', facecolors='none', edgecolors='#FF851B', label='Manjavacas')
ax1.scatter(sdf[self.xkey], sdf[self.ykey], marker='^', facecolors='none', edgecolors='#B10DC9', label='Schneider')
#if kwargs.get('log_scale', False):
# plt.xscale('log')
# plt.yscale('log')
#if np.std([s.xrange for s in self.shapes])>20.0*np.nanmedian([s.xrange for s in self.shapes]):
# plt.xscale('log')
#if np.std([s.yrange for s in self.shapes])>20.0*np.nanmedian([s.yrange for s in self.shapes]):
# plt.yscale('log')
filename=kwargs.get('filename', 'none')
#set limits of the plts from templates
ax1.set_xlim(kwargs.get('xlim', xlims))
ax1.set_ylim(kwargs.get('ylim', ylims))
#indices that use the continuum have ranges that are too high, logscale this?
ax1.legend(prop={'size': 16})
filenm=kwargs.get('filename',
OUTPUT_FIGURES+'/indices/index_plot_'+self.name.replace('/','_').replace('-', '_').replace(' ', '_')+'.jpeg')
if kwargs.get('save', True):
plt.savefig(filenm, dpi=100, bbox_inches='tight')
return
def crts_from_file(**kwargs):
"""
loads saved selection criteria
"""
filename=kwargs.get('filename',OUTPUT_FILES+'/id_id_spaces_cpl_all_shapes.pkl')
return pd.read_pickle(filename)
def save_criteria(**kwargs):
"""
creates selection criteria
table of selection criteria: ranges, completeness
"""
#load templates (will return spectral type and 10 indices for each object
#completeness =kwargs.get('completeness', 0.9)
all_spex=datasets['spex']
all_spex=all_spex[all_spex.snr1>10]
all_spex['Spts']=np.vstack(all_spex.spt.apply(make_spt_number).values)[:,0]
all_spex['Names']=all_spex.data_file
sd_bools=(all_spex['metallicity_class']=='sd') | (all_spex['metallicity_class']=='d/sd')
tpl_ids=all_spex[~sd_bools]
#templates['data_type']= 'templates'
sd_ids=all_spex[sd_bools]
#subdwarfs['data_type']= 'subdwarf
print (tpl_ids.shape, sd_ids.shape)
#work in log space
#tpl_ids[INDEX_NAMES]=(Annotator.reformat_table(tpl_ids[INDEX_NAMES]).applymap(float)+.1).applymap(np.log10)
#sd_ids[INDEX_NAMES]=(Annotator.reformat_table(sd_ids[INDEX_NAMES]).applymap(float)+1.).applymap(np.log10)
conts=kwargs.get('conts', None)
#print(conts)
conts=Annotator.reformat_table(conts.rename(columns={'grism_id': 'Names'}))
keys=INDEX_NAMES
index_spaces=[]
for x_key, y_key in tqdm(list(combinations(keys,2))):
idspace=IndexSpace(xkey=x_key, ykey=y_key)
#print (idspace.name)
#print (idspace.xkey, idspace.ykey)
#pass subdwarfs first
idspace.subdwarfs=sd_ids[[x_key, y_key, 'Names', 'Spts']]
idspace.templates=tpl_ids[[x_key, y_key, 'Names', 'Spts']]
#idspace._spex_sample=tpl_ids
#annotated_df=Annotator.group_by_spt(tpl_ids)
#idspace._calc_completeness(annotated_df)
#print (len(idspace.templates[idspace.templates['spt_range']=='L0-L5']))
idspace.contaminants=conts
index_spaces.append(idspace)
#create extra indices for y dwarfs
#save all 45 id-id spaces in a file
names=[x.name for x in index_spaces]
idx_space_dict=dict(zip(*[names, index_spaces]))
output = open( OUTPUT_FILES+'//id_id_spaces_cpl_all_shapes.pkl', 'wb')
pickle.dump(idx_space_dict, output)
output.close()
return index_spaces
def plot_cont_compl(**kwargs):
"""
plottting the contamination and completeness heatmaps
"""
cmap=kwargs.get('cmap', MYCOLORMAP)
crts=crts_from_file().values()
conts=pd.DataFrame([ x.contamination for x in crts]).drop(columns=['trash'])
compls=pd.DataFrame([ x.completeness for x in crts]).drop(columns=['trash'])
conts['index-space']=[x.name for x in crts]
compls['index-space']=[x.name for x in crts]
compls.sort_index()
conts.sort_index()
conts.index=['Idx'+str(i) for i in range(0, len(conts['index-space']))]
new_conts=conts.sort_values(by=list(conts.columns), ascending=False).drop(labels='index-space', axis=1)
compls.index=['Idx'+str(i) for i in range(0, len(compls['index-space']))]
new_compls=compls.sort_values(by=list(compls.columns), ascending=False).drop(labels='index-space', axis=1)
fig, (ax1, ax2)=plt.subplots(1, 2, figsize=kwargs.get('figsize',(10, 6)), sharex=True, sharey=True)
seaborn.heatmap(new_conts, cmap=cmap, ax=ax1)
seaborn.heatmap(new_compls, ax=ax2, cmap=cmap)
ax2.set_title('Completeness', fontsize=16)
ax1.set_title('Contamination', fontsize=16)
fig.savefig( OUTPUT_FIGURES+'/completeness_contamination.pdf', bbox_inches='tight')
return
if __name__ =="__main__":
crtFromfile()
|
mit
|
xavierwu/scikit-learn
|
examples/manifold/plot_mds.py
|
261
|
2616
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
bsd-3-clause
|
TomTranter/OpenPNM
|
tests/unit/algorithms/MixedPercolationTest.py
|
1
|
18990
|
import openpnm as op
import numpy as np
from openpnm.algorithms import MixedInvasionPercolation as mp
import matplotlib.pyplot as plt
import openpnm.models.geometry as gm
plt.close('all')
wrk = op.Workspace()
wrk.loglevel = 50
class MixedPercolationTest:
def setup_class(self, Np=5):
wrk.clear()
# Create Topological Network object
self.net = op.network.Cubic([Np, Np, 1], spacing=1)
self.geo = op.geometry.GenericGeometry(network=self.net,
pores=self.net.pores(),
throats=self.net.throats())
self.geo['pore.diameter'] = 0.5
self.geo['throat.diameter'] = 0.25
self.geo.add_model(propname='throat.endpoints',
model=gm.throat_endpoints.spherical_pores)
self.geo.add_model(propname='throat.length',
model=gm.throat_length.piecewise)
self.geo.add_model(propname='throat.volume',
model=gm.throat_volume.cylinder,
throat_diameter='throat.diameter',
throat_length='throat.length')
self.geo.add_model(propname='pore.volume',
model=gm.pore_volume.sphere,
pore_diameter='pore.diameter')
self.phase = op.phases.Air(network=self.net)
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.phase,
geometry=self.geo)
self.inlets = [0]
self.outlets = [Np*Np - 1]
def run_mp(self, trapping=False, residual=False, snap=False,
plot=False, flowrate=None):
IP_1 = mp(network=self.net)
if snap:
IP_1.settings['snap_off'] = 'throat.snap_off'
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=self.inlets)
if residual:
IP_1.set_residual(pores=self.phase['pore.occupancy'])
IP_1.run()
if trapping:
IP_1.set_outlets(self.outlets)
IP_1.apply_trapping()
inv_points = np.arange(0, 100, 1)
# returns data as well as plotting
alg_data = IP_1.get_intrusion_data(inv_points=inv_points)
self.phase.update(IP_1.results(Pc=inv_points.max()))
if plot:
plt.figure()
L = np.sqrt(self.net.Np).astype(int)
plt.imshow(IP_1['pore.invasion_sequence'].reshape([L, L]),
cmap=plt.get_cmap('Blues'))
if flowrate is not None:
IP_1.apply_flow(flowrate=flowrate)
self.alg = IP_1
return alg_data
def test_case_throats_sequential(self):
# Throats only
# Sequential
net = self.net
phys = self.phys
phys['throat.entry_pressure']=np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure']=0.0
dat_a = self.run_mp(False, False, False)
# Sequential w. trapping
dat_b = self.run_mp(True, False, False)
assert np.all(dat_a.S_tot==dat_b.S_tot)
def test_case_throats_random(self):
# Throats only
# Random
net = self.net
phys = self.phys
np.random.seed(2)
phys['throat.entry_pressure']=np.random.random(net.Nt)*net.Nt
phys['pore.entry_pressure']=0.0
dat_c = self.run_mp(False, False, False)
# Random w. trapping
np.random.seed(2)
dat_d = self.run_mp(True, False, False)
assert np.all(dat_d.S_tot<=dat_c.S_tot)
def test_case_pores_sequential(self):
# Pores only
# Sequential
net = self.net
phys = self.phys
phys['throat.entry_pressure']=0.0
phys['pore.entry_pressure']=np.arange(0, net.Np, dtype=float)
dat_e = self.run_mp(False, False, False)
# Sequential w. trapping
dat_f = self.run_mp(True, False, False)
assert np.all(dat_e.S_tot==dat_f.S_tot)
def test_case_pores_random(self):
# Random
net = self.net
phys = self.phys
np.random.seed(2)
phys['throat.entry_pressure']=0.0
phys['pore.entry_pressure']=np.random.random(net.Np)*net.Np
dat_g = self.run_mp(False, False, False)
# Random w. trapping
np.random.seed(2)
dat_h = self.run_mp(True, False, False)
assert np.all(dat_h.S_tot<=dat_g.S_tot)
def test_case_mixed_sequential(self):
# Pores and Throats
# Sequential
net = self.net
phys = self.phys
phys['throat.entry_pressure']=np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure']=np.arange(0, net.Np, dtype=float)
dat_i = self.run_mp(False, False, False)
# Sequential w. trapping
dat_j = self.run_mp(True, False, False)
assert np.all(dat_i.S_tot==dat_j.S_tot)
def test_case_mixed_random(self):
# Random
net = self.net
phys = self.phys
np.random.seed(2)
phys['throat.entry_pressure'] = np.random.random(net.Nt)*net.Nt
phys['pore.entry_pressure'] = np.random.random(net.Np)*net.Np
dat_k = self.run_mp(False, False, False)
# Random w. trapping
np.random.seed(2)
dat_l = self.run_mp(True, False, False)
assert np.all(dat_l.S_tot<=dat_k.S_tot)
def test_snap_off(self):
# Throats only
# Sequential
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = 0.0
dat_m = self.run_mp(False, False, False)
# Sequential w. snap-off
phys['throat.snap_off'] = 100.0 # This pressure is higher than burst
T = 10
[P1, P2] = self.net['throat.conns'][T]
phys['throat.snap_off'][T] = 0.5 # This pressure is lower than burst
dat_n = self.run_mp(False, False, True)
assert self.alg['pore.invasion_pressure'][P1] == 0.5
assert self.alg['pore.invasion_pressure'][P2] == 0.5
assert self.alg['throat.invasion_pressure'][T] == 0.5
assert ~np.all(dat_m.S_tot-dat_n.S_tot==0)
def test_residual(self):
# Throats only
# Sequential
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = 0.0
dat_o = self.run_mp(False, False, False)
# Sequential w. partial
T = 10
[P1, P2] = self.net['throat.conns'][T]
self.phase['pore.occupancy'] = False
self.phase['throat.occupancy'] = False
self.phase['pore.occupancy'][P1] = True
self.phase['pore.occupancy'][P2] = True
dat_p = self.run_mp(False, True, False, False)
assert self.alg['pore.invasion_pressure'][P1] == -np.inf
assert self.alg['pore.invasion_pressure'][P2] == -np.inf
assert self.alg['throat.invasion_pressure'][T] == -np.inf
assert ~np.all(dat_o.S_tot-dat_p.S_tot==0)
def test_apply_flow_rate(self):
t = self
pvol = np.sum(t.net['pore.volume'])
tvol = np.sum(t.net['throat.volume'])
tot = pvol+tvol
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = 0.0
self.run_mp(False, False, False, flowrate=tot)
assert 'throat.invasion_time' in self.phase.props()
def test_max_pressure(self):
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = 0.0
IP_1 = mp(network=self.net)
IP_1.settings['partial_saturation'] = False
IP_1.settings['snap_off'] = False
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=self.inlets)
IP_1.run(max_pressure=20)
IP_1.results(Pc=20)
inv_Pc = IP_1['pore.invasion_pressure']
inv_Pc = inv_Pc[~np.isinf(inv_Pc)]
assert inv_Pc.max() <= 20
def test_drainage_curve(self):
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = 0.0
IP_1 = mp(network=self.net)
self.phase['pore.occupancy'] = False
self.phase['throat.occupancy'] = False
IP_1.settings['snap_off'] = False
IP_1.setup(phase=self.phase)
inv_points = np.arange(0, 100, 1, dtype=float)
sat = np.zeros_like(inv_points)
tot_vol = self.net['pore.volume'].sum() + self.net['throat.volume'].sum()
for i, Pc in enumerate(inv_points):
IP_1.reset()
IP_1.set_inlets(pores=self.inlets)
IP_1.set_residual(pores=self.phase['pore.occupancy'])
IP_1.run(max_pressure=Pc)
IP_1.results(Pc)
Pinv_Pc = IP_1['pore.invasion_pressure']
Tinv_Pc = IP_1['throat.invasion_pressure']
sat[i] += np.sum(self.net['pore.volume'][Pinv_Pc<np.inf])
sat[i] += np.sum(self.net['throat.volume'][Tinv_Pc<np.inf])
assert sat.max()/tot_vol == 1.0
def test_plot_intrusion_curve(self):
net = self.net
phys = self.phys
phys['throat.entry_pressure']=np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure']=0.0
self.run_mp(False, False, False)
fig = plt.figure()
self.alg.plot_intrusion_curve(fig)
plt.close()
fig = self.alg.plot_intrusion_curve()
plt.close()
def test_cluster_merging(self):
phys = self.phys
phys['throat.entry_pressure'] = 0.0
Pc = np.array([[0.0, 1.0, 2.0, 1.0, 0.0],
[3.0, 4.0, 5.0, 4.0, 3.0],
[6.0, 7.0, 8.0, 7.0, 6.0],
[9.0, 10.0, 11.0, 10.0, 9.0],
[12.0, 13.0, 14.0, 13.0, 12.0]])
phys['pore.entry_pressure'] = Pc.flatten()
IP_1 = mp(network=self.net)
IP_1.settings['partial_saturation'] = False
IP_1.settings['snap_off'] = False
IP_1.setup(phase=self.phase)
# Set the inlets as the pores with zero entry Pc
IP_1.set_inlets(clusters=[[0], [4]])
IP_1.run()
# Clusters should merge on first row and all pores after the first row
# should be part of the same cluster
assert len(np.unique(IP_1['pore.cluster'][5:])) == 1
def test_connected_residual_clusters(self):
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = np.arange(0, net.Np, dtype=float)
IP_1 = mp(network=self.net)
IP_1.settings['residual_saturation'] = True
IP_1.settings['snap_off'] = False
IP_1.setup(phase=self.phase)
T = 20
[P1, P2] = self.net['throat.conns'][T]
self.phase['pore.occupancy'] = False
self.phase['throat.occupancy'] = False
self.phase['pore.occupancy'][P1] = True
self.phase['pore.occupancy'][P2] = True
IP_1.set_inlets(pores=self.inlets)
assert len(IP_1.queue) == 1
def test_disconnected_residual_clusters(self):
net = self.net
phys = self.phys
phys['throat.entry_pressure'] = np.arange(0, net.Nt, dtype=float)
phys['pore.entry_pressure'] = np.arange(0, net.Np, dtype=float)
IP_1 = mp(network=self.net)
IP_1.settings['snap_off'] = False
IP_1.setup(phase=self.phase)
T = 20
[P1, P2] = self.net['throat.conns'][T]
self.phase['pore.occupancy'] = False
self.phase['throat.occupancy'] = False
self.phase['pore.occupancy'][P1] = False
self.phase['pore.occupancy'][P2] = True
IP_1.set_inlets(pores=self.inlets)
IP_1.set_residual(pores=self.phase['pore.occupancy'])
assert len(IP_1.queue) == 2
def test_big_clusters(self):
self.setup_class(Np=10)
net = self.net
phys = self.phys
np.random.seed(1)
phys['throat.entry_pressure']=0.0
phys['pore.entry_pressure']=np.random.random(net.Np)*net.Np
self.inlets = net.pores('left')
self.outlets = None
np.random.seed(1)
self.phase['pore.occupancy'] = False
self.phase['throat.occupancy'] = False
self.phase['pore.occupancy'] = np.random.random(net.Np) < 0.25
IP_1 = mp(network=self.net)
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=self.inlets)
IP_1.set_residual(pores=self.phase['pore.occupancy'])
IP_1.run()
assert np.all(IP_1['pore.invasion_sequence'] > -1)
assert len(np.unique(IP_1['pore.cluster'])) > 1
def test_big_clusters_trapping(self):
self.setup_class(Np=10)
net = self.net
phys = self.phys
np.random.seed(1)
phys['throat.entry_pressure']=0.0
phys['pore.entry_pressure']=np.random.random(net.Np)*net.Np
self.inlets = net.pores('left')
self.outlets = net.pores('right')
np.random.seed(1)
self.phase['pore.occupancy'] = False
self.phase['throat.occupancy'] = False
self.phase['pore.occupancy'] = np.random.random(net.Np) < 0.25
IP_1 = mp(network=self.net)
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=self.inlets)
IP_1.set_residual(pores=self.phase['pore.occupancy'])
IP_1.run()
IP_1.set_outlets(self.outlets)
IP_1.apply_trapping()
assert np.sum(IP_1['pore.trapped'])==35
def test_invade_isolated_Ts(self):
self.setup_class(Np=10)
net = self.net
phys = self.phys
np.random.seed(1)
phys['throat.entry_pressure']=0.0
phys['pore.entry_pressure']=np.random.random(net.Np)*net.Np
self.inlets = net.pores('left')
self.outlets = None
IP_1 = mp(network=self.net)
IP_1.settings['invade_isolated_Ts']=False
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=self.inlets)
IP_1.run()
save_seq = IP_1['throat.invasion_sequence'].copy()
IP_1.settings['invade_isolated_Ts']=True
IP_1.reset()
IP_1.set_inlets(pores=self.inlets)
IP_1.run()
assert np.any(IP_1['throat.invasion_sequence']-save_seq != 0)
def test_terminate_clusters(self):
self.setup_class(Np=10)
net = self.net
phys = self.phys
np.random.seed(1)
phys['throat.entry_pressure']=0.0
phys['pore.entry_pressure']=np.random.random(net.Np)*net.Np
inlets = net.pores('left')
outlets = net.pores('right')
IP_1 = mp(network=self.net)
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=inlets)
IP_1.set_outlets(pores=outlets)
IP_1.run()
assert np.any(IP_1['throat.invasion_sequence'][outlets]>-1)
assert np.any(IP_1['throat.invasion_sequence']==-1)
def test_late_filling(self):
self.setup_class(Np=100)
net = self.net
phys = self.phys
np.random.seed(1)
phys['throat.entry_pressure'] = np.random.random(net.Nt)*10000 + 5000
phys['pore.entry_pressure'] = 0.0
phys.add_model(propname='pore.pc_star',
model=op.models.misc.from_neighbor_throats,
throat_prop='throat.entry_pressure',
mode='min')
phys.add_model(propname='pore.late_filling',
model=op.models.physics.multiphase.late_filling,
pressure='pore.pressure',
Pc_star='pore.pc_star',
eta=1, Swp_star=0.4)
phys['throat.pc_star'] = phys['throat.entry_pressure']
phys.add_model(propname='throat.late_filling',
model=op.models.physics.multiphase.late_filling,
pressure='throat.pressure',
Pc_star='throat.pc_star',
eta=1, Swp_star=0.2)
inlets = net.pores('left')
outlets = net.pores('right')
IP_1 = mp(network=self.net)
IP_1.setup(phase=self.phase)
IP_1.set_inlets(pores=inlets)
IP_1.set_outlets(pores=outlets)
IP_1.run()
inv_points = np.arange(phys['throat.entry_pressure'].min(),
phys['throat.entry_pressure'].max(), 100)
alg_data = IP_1.get_intrusion_data(inv_points=inv_points)
IP_1.settings['late_pore_filling'] = 'pore.late_filling'
IP_1.settings['late_throat_filling'] = 'throat.late_filling'
alg_data_lpf = IP_1.get_intrusion_data(inv_points=inv_points)
assert np.any(alg_data_lpf.S_tot - alg_data.S_tot < 0.0)
assert ~np.any(alg_data_lpf.S_tot - alg_data.S_tot > 0.0)
def test_bidirectional_entry_pressure(self):
pn = op.network.Cubic(shape=[3, 3, 3], spacing=2.5e-5)
geo = op.geometry.GenericGeometry(network=pn,
pores=pn.pores(),
throats=pn.throats())
geo['throat.diameter'] = 2.0e-5
geo['pore.diameter'] = (np.random.random(geo.Np)+0.5)*1e-5
geo['pore.volume'] = (4/3)*np.pi*(geo['pore.diameter']/2)**3
geo['throat.volume'] = 0.0
geo.add_model(propname='throat.centroid',
model=op.models.geometry.throat_centroid.pore_coords)
geo.add_model(propname='throat.normal',
model=op.models.geometry.throat_vector.pore_to_pore)
water = op.phases.Water(network=pn)
water['pore.contact_angle'] = 100
phys = op.physics.GenericPhysics(network=pn, phase=water, geometry=geo)
r_tor = 5e-6
pmod = op.models.physics.capillary_pressure.purcell_bidirectional
phys.add_model(propname='throat.entry_pressure',
model=pmod,
r_toroid=r_tor)
phys.add_model(propname='throat.max_pressure',
model=op.models.physics.meniscus.purcell,
r_toroid=r_tor,
mode='max')
phys['pore.entry_pressure'] = 0.0
ip = op.algorithms.MixedInvasionPercolation(network=pn)
ip.setup(phase=water)
ip.set_inlets(pores=pn.pores('bottom'))
ip.run()
alg_data = ip.get_intrusion_data()
# Max pressure is all the same but bi-directional touch pressure isn't
# So there will be different invasion points. Using max results in a
# Single invasion point
assert np.any(alg_data.S_pore < 1.0)
if __name__ == '__main__':
t = MixedPercolationTest()
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
mit
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/pyplots/auto_subplots_adjust.py
|
1
|
1760
|
"""
====================
Auto Subplots Adjust
====================
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(10))
ax.set_yticks((2,5,7))
labels = ax.set_yticklabels(('really, really, really', 'long', 'labels'))
def on_draw(event):
bboxes = []
for label in labels:
bbox = label.get_window_extent()
# the figure transform goes from relative coords->pixels and we
# want the inverse of that
bboxi = bbox.inverse_transformed(fig.transFigure)
bboxes.append(bboxi)
# this is the bbox that bounds all the bboxes, again in relative
# figure coords
bbox = mtransforms.Bbox.union(bboxes)
if fig.subplotpars.left < bbox.width:
# we need to move it over
fig.subplots_adjust(left=1.1*bbox.width) # pad a little
fig.canvas.draw()
return False
fig.canvas.mpl_connect('draw_event', on_draw)
pltshow(plt)
|
mit
|
themrmax/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regularization.py
|
355
|
2843
|
"""
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
|
bsd-3-clause
|
chris1610/pbpython
|
code/stacked_bar_app.py
|
1
|
1494
|
""" Example Dash application accompanying the post
at http://pbpython.com/plotly-dash-intro.html
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import pandas as pd
# Read in the Excel file
df = pd.read_excel(
"https://github.com/chris1610/pbpython/blob/master/data/salesfunnel.xlsx?raw=True"
)
# Pivot the data to get it a summary format
pv = pd.pivot_table(
df,
index=['Name'],
columns=["Status"],
values=['Quantity'],
aggfunc=sum,
fill_value=0)
# Build a trace for each status that will eventual make the stacked bar
trace1 = go.Bar(x=pv.index, y=pv[('Quantity', 'declined')], name='Declined')
trace2 = go.Bar(x=pv.index, y=pv[('Quantity', 'pending')], name='Pending')
trace3 = go.Bar(x=pv.index, y=pv[('Quantity', 'presented')], name='Presented')
trace4 = go.Bar(x=pv.index, y=pv[('Quantity', 'won')], name='Won')
# Create the basic app
app = dash.Dash()
# Populate the HTML structure of the app with the graph element
app.layout = html.Div(children=[
html.H1(children='Sales Funnel Report'),
html.Div(children='''National Sales Funnel Report.'''),
dcc.Graph(
id='example-graph',
figure={
'data': [trace1, trace2, trace3, trace4],
'layout':
go.Layout(title='Order Status by Customer', barmode='stack')
})
])
# Allow the app to serve from the command line
if __name__ == '__main__':
app.run_server(debug=True)
|
bsd-3-clause
|
johnowhitaker/bobibabber
|
sklearn/preprocessing/tests/test_imputation.py
|
1
|
11072
|
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(1, False))
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(1, True))
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
"""Verify the shapes of the imputed matrix for different strategies."""
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
"""Test imputation using the mean and median strategies, when
missing_values == 0."""
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
X_imputed_median = np.array([
[2, 5, 5],
[1, np.nan, 3],
[2, 5, 5],
[6, 5, 13],
])
statistics_median = [np.nan, 2, np.nan, 5, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X, X_imputed_median, "median", statistics_median, 0)
def test_imputation_mean_median():
"""Test imputation using the mean and median strategies, when
missing_values != 0."""
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_most_frequent():
"""Test imputation using the most-frequent strategy."""
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
"""Test imputation within a pipeline + gridsearch."""
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).todense()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
"""Test for pickling imputers."""
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
"""Test imputation with copy"""
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().todense()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().todense()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
|
mit
|
bwgref/nustar_moving_target
|
nustar_planning/io.py
|
1
|
3817
|
def download_tle(outdir='./'):
"""Download the NuSTAR TLE archive.
Parameters
----------
outdir: Optional desired output location. Defaults to the working directory.
Returns
----------
Returns the filename that you've downloaded.
Notes
---------
"""
import os
import wget
# Make sure you've got a trailing slash...
if not(outdir.endswith('/')):
outdir+'/'
# Make sure the directory exists and create one if not.
directory = os.path.dirname(outdir)
if not os.path.exists(directory):
os.makedirs(directory)
myname='nustar_pysolar.io.download_tle'
url='http://www.srl.caltech.edu/NuSTAR_Public/NuSTAROperationSite/NuSTAR.tle'
# Check to see if the file exists:
fname = 'NuSTAR.tle'
outfile = outdir+'/'+fname
if (os.path.isfile(outfile)):
os.remove(outfile)
wget.download(url, out=outfile)
return outfile
def read_tle_file(tlefile, **kwargs):
"""Read in the TLE file.
Returns the times for each line element and the TLE time
"""
times = []
line1 = []
line2 = []
from datetime import datetime
# Catch if the file can't be opened:
try:
f = open(tlefile, 'r')
except FileNotFoundError:
print("Unable to open: "+tlefile)
ln=0
for line in f:
# print(line)
if (ln == 0):
year= int(line[18:20])
day = int(line[20:23])
times.extend([datetime.strptime("{}:{}".format(year, day), "%y:%j")])
line1.extend([line.strip()])
ln=1
else:
ln=0
line2.extend([line.strip()])
f.close()
return times, line1, line2
def get_epoch_tle(epoch, tlefile):
"""Find the TLE that is closest to the epoch you want to search.
epoch is a datetime object, tlefile is the file you want to search through.
"""
times, line1, line2 = read_tle_file(tlefile)
from astropy.time import Time
# Allow astropy Time objects
if type(epoch) is Time:
epoch = epoch.datetime
mindt = 100.
min_ind = 0
for ind, t in enumerate(times):
dt = abs((epoch -t).days)
if dt < mindt:
min_ind = ind
mindt = dt
good_line1 = line1[min_ind]
good_line2 = line2[min_ind]
return mindt, good_line1, good_line2
def parse_occ(file):
import pandas as pd
from datetime import datetime
'''Parse the occultation file that you generated usisng the orbit_model/occ script'''
df = pd.read_csv(file, delim_whitespace=True, header=None, skiprows=6,
names = ['ingress', 'ingress_ang', 'midpoint_eng', 'midpoint_ang',
'egress', 'egress_ang'])
df['visible'] = df['egress']
df['occulted'] = df['egress']
for ind in range(len(df)):
if ind == len(df) -1:
break
df.loc[ind,('visible')] = datetime.strptime(
df.loc[ind, ('egress')],
'%Y:%j:%H:%M:%S')
df.loc[ind,('occulted')] = datetime.strptime(
df.loc[ind+1, ('ingress')],
'%Y:%j:%H:%M:%S')
orbits = df.loc[0:len(df)-2, ('visible', 'occulted')]
return orbits
def parse_pa(file):
'''Parse the output PA string that you generated usisng the orbit_model/occ script
This should have exactly one line of the format:
Position angle: 43.608806 [deg]
'''
f = open(file)
for line in f:
fields = line.split()
mps_pa = float(fields[2])
# Remember that the mission planning PA is 180 degrees off from the SKY PA:
sky_pa = 180 - mps_pa
return sky_pa
|
mit
|
sumspr/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
286
|
4353
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
bsd-3-clause
|
arjoly/scikit-learn
|
examples/decomposition/plot_pca_iris.py
|
253
|
1801
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
murrayrm/python-control
|
examples/robust_siso.py
|
2
|
2748
|
"""robust_siso.py
Demonstrate mixed-sensitivity H-infinity design for a SISO plant.
Based on Example 2.11 from Multivariable Feedback Control, Skogestad
and Postlethwaite, 1st Edition.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from control import tf, mixsyn, feedback, step_response
s = tf([1, 0], 1)
# the plant
g = 200/(10*s + 1) / (0.05*s + 1)**2
# disturbance plant
gd = 100/(10*s + 1)
# first design
# sensitivity weighting
M = 1.5
wb = 10
A = 1e-4
ws1 = (s/M + wb) / (s + wb*A)
# KS weighting
wu = tf(1, 1)
k1, cl1, info1 = mixsyn(g, ws1, wu)
# sensitivity (S) and complementary sensitivity (T) functions for
# design 1
s1 = feedback(1, g*k1)
t1 = feedback(g*k1, 1)
# second design
# this weighting differs from the text, where A**0.5 is used; if you use that,
# the frequency response doesn't match the figure. The time responses
# are similar, though.
ws2 = (s/M ** 0.5 + wb)**2 / (s + wb*A)**2
# the KS weighting is the same as for the first design
k2, cl2, info2 = mixsyn(g, ws2, wu)
# S and T for design 2
s2 = feedback(1, g*k2)
t2 = feedback(g*k2, 1)
# frequency response
omega = np.logspace(-2, 2, 101)
ws1mag, _, _ = ws1.frequency_response(omega)
s1mag, _, _ = s1.frequency_response(omega)
ws2mag, _, _ = ws2.frequency_response(omega)
s2mag, _, _ = s2.frequency_response(omega)
plt.figure(1)
# text uses log-scaled absolute, but dB are probably more familiar to most control engineers
plt.semilogx(omega, 20*np.log10(s1mag.flat), label='$S_1$')
plt.semilogx(omega, 20*np.log10(s2mag.flat), label='$S_2$')
# -1 in logspace is inverse
plt.semilogx(omega, -20*np.log10(ws1mag.flat), label='$1/w_{P1}$')
plt.semilogx(omega, -20*np.log10(ws2mag.flat), label='$1/w_{P2}$')
plt.ylim([-80, 10])
plt.xlim([1e-2, 1e2])
plt.xlabel('freq [rad/s]')
plt.ylabel('mag [dB]')
plt.legend()
plt.title('Sensitivity and sensitivity weighting frequency responses')
# time response
time = np.linspace(0, 3, 201)
_, y1 = step_response(t1, time)
_, y2 = step_response(t2, time)
# gd injects into the output (that is, g and gd are summed), and the
# closed loop mapping from output disturbance->output is S.
_, y1d = step_response(s1*gd, time)
_, y2d = step_response(s2*gd, time)
plt.figure(2)
plt.subplot(1, 2, 1)
plt.plot(time, y1, label='$y_1(t)$')
plt.plot(time, y2, label='$y_2(t)$')
plt.ylim([-0.1, 1.5])
plt.xlim([0, 3])
plt.xlabel('time [s]')
plt.ylabel('signal [1]')
plt.legend()
plt.title('Tracking response')
plt.subplot(1, 2, 2)
plt.plot(time, y1d, label='$y_1(t)$')
plt.plot(time, y2d, label='$y_2(t)$')
plt.ylim([-0.1, 1.5])
plt.xlim([0, 3])
plt.xlabel('time [s]')
plt.ylabel('signal [1]')
plt.legend()
plt.title('Disturbance response')
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
|
bsd-3-clause
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/core/index.py
|
2
|
173979
|
# pylint: disable=E1101,E1103,W0232
import datetime
import warnings
import operator
from functools import partial
from pandas.compat import range, zip, lrange, lzip, u, reduce, filter, map
from pandas import compat
import numpy as np
from sys import getsizeof
import pandas.tslib as tslib
import pandas.lib as lib
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate)
from pandas.core.common import isnull, array_equivalent
import pandas.core.common as com
from pandas.core.common import (_values_from_object, is_float, is_integer,
ABCSeries, _ensure_object, _ensure_int64, is_bool_indexer,
is_list_like, is_bool_dtype, is_null_slice, is_integer_dtype)
from pandas.core.config import get_option
from pandas.io.common import PerformanceWarning
# simplify
default_pprint = lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
duplicated='np.array')
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _indexOp(opname):
"""
Wrapper function for index comparison operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = getattr(self._data.view(np.ndarray), opname)
result = func(np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except: # pragma: no cover
return result
return wrapper
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_groupby = _algos.groupby_object
_arrmap = _algos.arrmap_object
_left_indexer_unique = _algos.left_join_indexer_unique_object
_left_indexer = _algos.left_join_indexer_object
_inner_indexer = _algos.inner_join_indexer_object
_outer_indexer = _algos.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
tupleize_cols=True, **kwargs):
# no class inference!
if fastpath:
return cls._simple_new(data, name)
from pandas.tseries.period import PeriodIndex
if isinstance(data, (np.ndarray, Index, ABCSeries)):
if issubclass(data.dtype.type, np.datetime64):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif issubclass(data.dtype.type, np.timedelta64):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
data = np.array(data, dtype=dtype, copy=copy)
except TypeError:
pass
# maybe coerce to a sub-class
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if issubclass(data.dtype.type, np.integer):
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.floating):
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or np.isscalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and isinstance(data, list) and data:
try:
sorted(data)
has_mixed_types = False
except (TypeError, UnicodeDecodeError):
has_mixed_types = True # python3 only
if isinstance(data[0], tuple) and not has_mixed_types:
try:
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
pass # python2 - MultiIndex fails on mixed types
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
return Int64Index(subarr.astype('i8'), copy=copy, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if (inferred.startswith('datetime') or
tslib.is_timestamp_array(subarr)):
from pandas.tseries.index import DatetimeIndex
return DatetimeIndex(subarr, copy=copy, name=name, **kwargs)
elif (inferred.startswith('timedelta') or
lib.is_timedelta_array(subarr)):
from pandas.tseries.tdi import TimedeltaIndex
return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs)
elif inferred == 'period':
return PeriodIndex(subarr, name=name, **kwargs)
return cls._simple_new(subarr, name)
@classmethod
def _simple_new(cls, values, name=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result,k,v)
result._reset_identity()
return result
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(other, '_id', Ellipsis)
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._shallow_copy(result)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
def _array_values(self):
return self._data
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
def repeat(self, n):
"""
return a new Index of the values repeated n times
See also
--------
numpy.ndarray.repeat
"""
return self._shallow_copy(self.values.repeat(n))
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self.values.ravel(order=order)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
raise TypeError(
'{0}(...) must be called with a collection of some kind, {1} was '
'passed'.format(cls.__name__, repr(data))
)
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays."""
if not isinstance(data, (np.ndarray, Index)):
if data is None or np.isscalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([ (k,getattr(self,k,None)) for k in self._attributes])
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls,'_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _shallow_copy(self, values=None, **kwargs):
""" create a new Index, don't copy the data, use the same object attributes
with passed in attributes taking precedence """
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self.__class__._simple_new(values,**attributes)
def copy(self, names=None, name=None, dtype=None, deep=False):
"""
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if deep:
from copy import deepcopy
new_index = self._shallow_copy(self._data.copy())
name = name or deepcopy(self.name)
else:
new_index = self._shallow_copy()
name = self.name
if name is not None:
names = [name]
if names:
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
return self.values
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
dtype=dtype)
def to_datetime(self, dayfirst=False):
"""
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
return True
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name,))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d'
% len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int or level name, or sequence of int / level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels)
Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if hasattr(head, 'format') and\
not isinstance(head, compat.string_types):
head = head.format()
tail = self[-1]
if hasattr(tail, 'format') and\
not isinstance(tail, compat.string_types):
tail = tail.format()
index_summary = ', %s to %s' % (com.pprint_thing(head),
com.pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self._engine.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return self._engine.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return self.dtype == np.object_
def is_mixed(self):
return 'mixed' in self.inferred_type
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
def _convert_scalar_indexer(self, key, kind=None):
"""
convert a scalar indexer
Parameters
----------
key : label of the slice bound
kind : optional, type of the indexing operation (loc/ix/iloc/None)
right now we are converting
floats -> ints if the index supports it
"""
def to_int():
ikey = int(key)
if ikey != key:
return self._invalid_indexer('label', key)
return ikey
if kind == 'iloc':
if is_integer(key):
return key
elif is_float(key):
key = to_int()
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return key
return self._invalid_indexer('label', key)
if is_float(key):
if not self.is_floating():
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return to_int()
return key
def _convert_slice_indexer_getitem(self, key, is_index_slice=False):
""" called from the getitem slicers, determine how to treat the key
whether positional or not """
if self.is_integer() or is_index_slice:
return key
return self._convert_slice_indexer(key)
def _convert_slice_indexer(self, key, kind=None):
"""
convert a slice indexer. disallow floats in the start/stop/step
Parameters
----------
key : label of the slice bound
kind : optional, type of the indexing operation (loc/ix/iloc/None)
"""
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
# need to coerce to_int if needed
def f(c):
v = getattr(key,c)
if v is None or is_integer(v):
return v
# warn if it's a convertible float
if v == int(v):
warnings.warn("slice indexers when using iloc should be integers "
"and not floating point",FutureWarning)
return int(v)
self._invalid_indexer('slice {0} value'.format(c), v)
return slice(*[ f(c) for c in ['start','stop','step']])
# validate slicers
def validate(v):
if v is None or is_integer(v):
return True
# dissallow floats (except for .ix)
elif is_float(v):
if kind == 'ix':
return True
return False
return True
for c in ['start','stop','step']:
v = getattr(key,c)
if not validate(v):
self._invalid_indexer('slice {0} value'.format(c), v)
# figure out if this is a positional indexer
start, stop, step = key.start, key.stop, key.step
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
return self._convert_slice_indexer_getitem(
key, is_index_slice=is_index_slice)
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
if start is not None:
i = self.get_loc(start)
if stop is not None:
j = self.get_loc(stop)
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_list_indexer(self, key, kind=None):
""" convert a list indexer. these should be locations """
return key
def _convert_list_indexer_for_mixed(self, keyarr, kind=None):
""" passed a key that is tuplesafe that is integer based
and we have a mixed index (e.g. number/labels). figure out
the indexer. return None if we can't help
"""
if (kind is None or kind in ['iloc','ix']) and (is_integer_dtype(keyarr) and not self.is_floating()):
if self.inferred_type != 'integer':
keyarr = np.where(keyarr < 0,
len(self) + keyarr, keyarr)
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
from pandas.core.indexing import _maybe_convert_indices
return _maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(form=form,
klass=type(self),
key=key,
kind=type(key)))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self.values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level,))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)'
% (level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __deepcopy__(self, memo={}):
return self.copy(deep=True)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Indexes does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if np.isscalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not np.isscalar(result):
return promote(result)
else:
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(np.concatenate(to_concat), name=name)
@staticmethod
def _ensure_compat_concat(indexes):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
is_ts = [isinstance(idx, klasses) for idx in indexes]
if any(is_ts) and not all(is_ts):
return [_maybe_box(idx) for idx in indexes]
return indexes
def take(self, indexer, axis=0):
"""
return a new Index of the values selected by the indexer
See also
--------
numpy.ndarray.take
"""
indexer = com._ensure_platform_int(indexer)
taken = np.array(self).take(indexer)
# by definition cannot propogate freq
return self._shallow_copy(taken, freq=None)
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
np.putmask(values, mask, value)
return self._shallow_copy(values)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(com.pprint_thing(self.name,
escape_chars=('\t', '\r', '\n'))
if self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.core.format import format_array
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values, safe=1)
if values.dtype == np.object_:
result = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
""" slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', **kwargs):
""" actually format my specific types """
mask = isnull(self)
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values.tolist()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if type(other) != Index:
return other.equals(self)
return array_equivalent(_values_from_object(self), _values_from_object(other))
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return _get_na_value(self.dtype)
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError('Cannot sort an %r object' % self.__class__.__name__)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
if periods == 0:
# OK because immutable
return self
offset = periods * freq
return Index([idx + offset for idx in self], name=self.name)
def argsort(self, *args, **kwargs):
"""
return an ndarray indexer of the underlying data
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
if isinstance(other, Index):
warnings.warn("using '+' to provide set union with Indexes is deprecated, "
"use '|' or .union()",FutureWarning)
return self.union(other)
return Index(np.array(self) + other)
__iadd__ = __add__
def __sub__(self, other):
if isinstance(other, Index):
warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
"use .difference()",FutureWarning)
return self.difference(other)
__eq__ = _indexOp('__eq__')
__ne__ = _indexOp('__ne__')
__lt__ = _indexOp('__lt__')
__gt__ = _indexOp('__gt__')
__le__ = _indexOp('__le__')
__ge__ = _indexOp('__ge__')
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.sym_diff(other)
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable.')
if len(other) == 0 or self.equals(other):
return self
if len(self) == 0:
return _ensure_index(other)
self._assert_can_do_setop(other)
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self.values, other.values)[0]
except TypeError:
# incomparable objects
result = list(self.values)
# worth making this faster? a very unusual case
value_set = set(self.values)
result.extend([x for x in other.values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = com.take_nd(other.values, indexer,
allow_fill=False)
result = com._concat_compat((self.values, other_diff))
try:
self.values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self.values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(data=result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects. Sortedness of the result is
not guaranteed
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self.values, other.values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = self.get_indexer(other.values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = self.get_indexer_non_unique(other.values)[0].unique()
indexer = indexer[indexer != -1]
taken = self.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Compute sorted set difference of two Index objects
Parameters
----------
other : Index or array-like
Returns
-------
diff : Index
Notes
-----
One can do either of these and achieve the same result
>>> index.difference(index2)
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
if self.equals(other):
return Index([], name=self.name)
if not isinstance(other, Index):
other = np.asarray(other)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
theDiff = sorted(set(self) - set(other))
return Index(theDiff, name=result_name)
diff = deprecate('diff',difference)
def sym_diff(self, other, result_name=None):
"""
Compute the sorted symmetric difference of two Index objects.
Parameters
----------
other : array-like
result_name : str
Returns
-------
sym_diff : Index
Notes
-----
``sym_diff`` contains elements that appear in either ``idx1`` or
``idx2`` but not both. Equivalent to the Index created by
``(idx1 - idx2) + (idx2 - idx1)`` with duplicates dropped.
The sorting of a result containing ``NaN`` values is not guaranteed
across Python versions. See GitHub issue #6444.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.sym_diff(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
if not isinstance(other, Index):
other = Index(other)
result_name = result_name or self.name
the_diff = sorted(set((self.difference(other)).union(other.difference(self))))
return Index(the_diff, name=result_name)
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
return self._engine.get_loc(_values_from_object(key))
indexer = self.get_indexer([key], method=method)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
k = _values_from_object(key)
# prevent integer truncation bug in indexing
if is_float(k) and not self.is_floating():
raise KeyError
try:
return self._engine.get_value(s, k)
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer','boolean']:
raise
try:
return tslib.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if com.is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if np.isscalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(
_values_from_object(arr), _values_from_object(key), value)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : ndarray
"""
# checks that level number is actually just 1
self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int
Maximum number of consecuctive labels in ``target`` to match for
inexact matches.
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
method = com._clean_reindex_fill_method(method)
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit)
if self.dtype != target.dtype:
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit)
else:
indexer = self._engine.get_indexer(target.values)
return com._ensure_platform_int(indexer)
def _get_fill_indexer(self, target, method, limit=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad'
else self._engine.get_backfill_indexer)
indexer = method(target.values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
target = np.asarray(target)
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances)
| (right_indexer == -1),
left_indexer, right_indexer)
return indexer
def get_indexer_non_unique(self, target):
""" return an indexer suitable for taking from a non unique index
return the labels in the same order as the target, and
return a missing indexer into the target (missing are marked as -1
in the indexer); target must be an iterable """
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target.values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
""" guaranteed return of an indexer even when non-unique """
if self.is_unique:
return self.get_indexer(target, **kwargs)
return self.get_indexer_non_unique(target, **kwargs)[0]
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if self.dtype != 'object':
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, to_groupby):
"""
Group the index labels by a given array of values.
Parameters
----------
to_groupby : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
return self._groupby(self.values, _values_from_object(to_groupby))
def map(self, mapper):
return self._arrmap(self.values, mapper)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember(self._array_values(), value_set)
def reindex(self, target, method=None, level=None, limit=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(np.empty(0, dtype=self.dtype), **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
Internal API method. Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if (level is None and (self_is_mi or other_is_mi)):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how, return_indexers=return_indexers)
# join on the level
if (level is not None and (self_is_mi or other_is_mi)):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how,
return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [ n for n in self.names if n is not None ]
other_names = [ n for n in other.names if n is not None ]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level overlap on a multi-index is not implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values], [other.values],
how=how, sort=True)
left_idx = com._ensure_platform_int(left_idx)
right_idx = com._ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other.values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left',
return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from pandas.algos import groupsort_indexer
def _get_leaf_sorter(labels):
'''
returns sorter for the inner most level while preserving the
order of higher levels
'''
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = com._ensure_int64(labels[0])
sorter, _ = groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of begining of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = com._ensure_int64(labels[-1])
return lib.get_level_sorter(lab, com._ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = com._ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = com.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left))
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = groupsort_indexer(new_lev_labels,
ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels,
labels=new_labels,
names=left.names,
verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = com.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self.values
ov = other.values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(other, self)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind)
# return a slice
if not lib.isscalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not lib.isscalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
self._invalid_indexer('slice',label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice',label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side='right' if side == 'left' else 'right')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None, the type of indexer
"""
if side not in ('left', 'right'):
raise ValueError(
"Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" % (side,))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'))
if isinstance(slc, np.ndarray):
raise KeyError(
"Cannot get %s slice bound for non-unique label:"
" %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : string, defaults None
Returns
-------
start, end : int
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return Index(np.delete(self._data, loc), name=self.name)
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item_idx = Index([item], dtype=self.dtype).values
idx = np.concatenate(
(_self[:loc], item_idx, _self[loc:]))
return Index(idx, name=self.name)
def drop(self, labels):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
raise ValueError('labels %s not contained in axis' % labels[mask])
return self.delete(indexer)
@Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, take_last=False):
result = super(Index, self).drop_duplicates(take_last=take_last)
return self._constructor(result)
@Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, take_last=False):
return super(Index, self).duplicated(take_last=take_last)
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: {typ}".format(name=name,
typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
@classmethod
def _add_numeric_methods(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False):
def _evaluate_numeric_binop(self, other):
import pandas.tseries.offsets as offsets
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} with type: {typ}".format(opstr=type(self),
typ=type(other)))
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f','i']:
raise TypeError("cannot evaluate a numeric op with a non-numeric dtype")
elif isinstance(other, (offsets.DateOffset, np.timedelta64, Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
else:
if not (com.is_float(other) or com.is_integer(other)):
raise TypeError("can only perform ops with scalar values")
# if we are a reversed non-communative op
values = self.values
if reversed:
values, other = other, values
return self._shallow_copy(op(values, other))
return _evaluate_numeric_binop
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
return self._shallow_copy(op(self.values))
return _evaluate_numeric_unary
cls.__add__ = cls.__radd__ = _make_evaluate_binop(operator.add,'__add__')
cls.__sub__ = _make_evaluate_binop(operator.sub,'__sub__')
cls.__rsub__ = _make_evaluate_binop(operator.sub,'__sub__',reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(operator.mul,'__mul__')
cls.__floordiv__ = _make_evaluate_binop(operator.floordiv,'__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(operator.floordiv,'__floordiv__',reversed=True)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,'__truediv__')
cls.__rtruediv__ = _make_evaluate_binop(operator.truediv,'__truediv__',reversed=True)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(operator.div,'__div__')
cls.__rdiv__ = _make_evaluate_binop(operator.div,'__div__',reversed=True)
cls.__neg__ = _make_evaluate_unary(lambda x: -x,'__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x,'__pos__')
cls.__abs__ = _make_evaluate_unary(lambda x: np.abs(x),'__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x,'__inv__')
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
All arguments to numpy.%(outname)s are accepted.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if isinstance(result, (np.ndarray, com.ABCSeries, Index)) \
and result.ndim == 0:
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function(
'all', 'Return whether all elements are True', np.all)
cls.any = _make_logical_function(
'any', 'Return whether any element is True', np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: {typ}".format(name=name,
typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.all = _make_invalid_op('all')
cls.any = _make_invalid_op('any')
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def _maybe_cast_slice_bound(self, label, side, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
# we are a numeric index, so we accept
# integer/floats directly
if not (is_integer(label) or is_float(label)):
self._invalid_indexer('slice',label)
return label
class Int64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Int64Index is a special case
of `Index` with purely integer labels. This is the default index type used
by the DataFrame and Series ctors when no explicit index is provided by the
user.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: int64)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects
"""
_typ = 'int64index'
_groupby = _algos.groupby_int64
_arrmap = _algos.arrmap_int64
_left_indexer_unique = _algos.left_join_indexer_unique_int64
_left_indexer = _algos.left_join_indexer_int64
_inner_indexer = _algos.inner_join_indexer_int64
_outer_indexer = _algos.outer_join_indexer_int64
_engine_type = _index.Int64Engine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
elif issubclass(data.dtype.type, np.integer):
# don't force the upcast as we may be dealing
# with a platform int
if dtype is None or not issubclass(np.dtype(dtype).type,
np.integer):
dtype = np.int64
subarr = np.array(data, dtype=dtype, copy=copy)
else:
subarr = np.array(data, dtype=np.int64, copy=copy)
if len(data) > 0:
if (subarr != data).any():
raise TypeError('Unsafe NumPy casting to integer, you must'
' explicitly cast')
return cls._simple_new(subarr, name=name)
@property
def inferred_type(self):
return 'integer'
@cache_readonly
def hasnans(self):
# by definition
return False
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
# if not isinstance(other, Int64Index):
# return False
try:
return array_equivalent(_values_from_object(self), _values_from_object(other))
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
class Float64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Float64Index is a special case
of `Index` with purely floating point labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Float64Index instance can **only** contain hashable objects
"""
_typ = 'float64index'
_engine_type = _index.Float64Engine
_groupby = _algos.groupby_float64
_arrmap = _algos.arrmap_float64
_left_indexer_unique = _algos.left_join_indexer_unique_float64
_left_indexer = _algos.left_join_indexer_float64
_inner_indexer = _algos.inner_join_indexer_float64
_outer_indexer = _algos.outer_join_indexer_float64
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name)
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if dtype is None:
dtype = np.float64
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except:
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
# coerce to float64 for storage
if subarr.dtype != np.float64:
subarr = subarr.astype(np.float64)
return cls._simple_new(subarr, name)
@property
def inferred_type(self):
return 'floating'
def astype(self, dtype):
if np.dtype(dtype) not in (np.object, np.float64):
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(self.values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, kind=None):
if kind == 'iloc':
return super(Float64Index, self)._convert_scalar_indexer(key,
kind=kind)
return key
def _convert_slice_indexer(self, key, kind=None):
"""
convert a slice indexer, by definition these are labels
unless we are iloc
Parameters
----------
key : label of the slice bound
kind : optional, type of the indexing operation (loc/ix/iloc/None)
"""
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step)
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not np.isscalar(key):
raise InvalidIndexError
from pandas.core.indexing import maybe_droplevels
from pandas.core.series import Series
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
if np.isscalar(new_values) or new_values is None:
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (self._isnan & other._isnan)).all()
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and _try_get_item(other) in self
except TypeError:
return False
except:
return False
def get_loc(self, key, method=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
@cache_readonly
def _nan_idxs(self):
w, = self._isnan.nonzero()
return w
@cache_readonly
def _isnan(self):
return np.isnan(self.values)
@cache_readonly
def hasnans(self):
return self._isnan.any()
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember_nans(self._array_values(), value_set,
isnull(list(value_set)).any())
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
class MultiIndex(Index):
"""
Implements multi-level, a.k.a. hierarchical, index object for pandas
objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels.
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
copy=False, verify_integrity=True, _set_identity=True, **kwargs):
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
if len(levels) == 1:
if names:
name = names[0]
else:
name = None
return Index(levels[0], name=name, copy=True).take(labels[0])
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self):
"""Raises ValueError if length of levels and labels don't match or any
label would exceed level bounds"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels, levels = self.labels, self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" % (
[len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
def _get_levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
if verify_integrity:
self._verify_integrity()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int or level name, or sequence of int / level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to read only property
__set_levels = deprecate("setting `levels` directly",
partial(set_levels, inplace=True,
verify_integrity=True),
alt_name="set_levels")
levels = property(fget=_get_levels, fset=__set_levels)
def _get_labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(_ensure_frozen(lab, lev, copy=copy)._shallow_copy()
for lev, lab in zip(self.levels, labels))
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for l, lev, lab in zip(level, self.levels, labels):
new_labels[l] = _ensure_frozen(lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
if verify_integrity:
self._verify_integrity()
def set_labels(self, labels, level=None, inplace=False, verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int or level name, or sequence of int / level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(labels):
raise TypeError("Labels must be list-like")
if is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or is_list_like(level):
if not is_list_like(labels) or not is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to readonly property
__set_labels = deprecate("setting labels directly",
partial(set_labels, inplace=True,
verify_integrity=True),
alt_name="set_labels")
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
if deep:
from copy import deepcopy
levels = levels if levels is not None else deepcopy(self.levels)
labels = labels if labels is not None else deepcopy(self.labels)
names = names if names is not None else deepcopy(self.names)
else:
levels = self.levels
labels = self.labels
names = self.names
return MultiIndex(levels=levels,
labels=labels,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, result=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
_shallow_copy = view
def _array_values(self):
# hack for various methods
return self.values
@cache_readonly
def dtype(self):
return np.dtype('O')
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
level_nbytes = sum(( i.nbytes for i in self.levels ))
label_nbytes = sum(( i.nbytes for i in self.labels ))
names_nbytes = sum(( getsizeof(i) for i in self.names ))
return level_nbytes + label_nbytes + names_nbytes
def __repr__(self):
encoding = get_option('display.encoding')
attrs = [('levels', default_pprint(self.levels)),
('labels', default_pprint(self.labels))]
if not all(name is None for name in self.names):
attrs.append(('names', default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', default_pprint(self.sortorder)))
space = ' ' * (len(self.__class__.__name__) + 1)
prepr = (u(",\n%s") % space).join([u("%s=%s") % (k, v)
for k, v in attrs])
res = u("%s(%s)") % (self.__class__.__name__, prepr)
if not compat.PY3:
# needs to be str in Python 2
res = res.encode(encoding)
return res
def __unicode__(self):
"""
Return a string representation for a particular Index
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
rows = self.format(names=True)
max_rows = get_option('display.max_rows')
if len(rows) > max_rows:
spaces = (len(rows[0]) - 3) // 2
centered = ' ' * spaces
half = max_rows // 2
rows = rows[:half] + [centered + '...' + centered] + rows[-half:]
return "\n".join(rows)
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError(
'Length of names must match number of levels in MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
self.levels[l].rename(name, inplace=True)
names = property(
fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex")
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return np.sum(name == np.asarray(self.names)) > 1
def _format_native_types(self, **kwargs):
return self.tolist()
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
'Too many levels: Index has only %d levels, '
'%d is not a valid level number' % (self.nlevels, orig_level)
)
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for lev, lab in zip(self.levels, self.labels):
# Need to box timestamps, etc.
box = hasattr(lev, '_box_values')
# Try to minimize boxing.
if box and len(lev) > len(lab):
taken = lev._box_values(com.take_1d(lev.values, lab))
elif box:
taken = com.take_1d(lev._box_values(lev.values), lab,
fill_value=_get_na_value(lev.dtype.type))
else:
taken = com.take_1d(lev.values, lab)
values.append(taken)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, take_last=False):
from pandas.core.groupby import get_group_index
from pandas.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.labels, shape, sort=False, xnull=False)
return duplicated_int64(ids, take_last)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
from pandas.core.series import Series
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series.values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return _index.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if com.is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if isinstance(key, (datetime.datetime, np.datetime64)) or (
compat.PY3 and isinstance(key, compat.string_types)):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int or level name
Returns
-------
values : ndarray
"""
num = self._get_level_number(level)
unique = self.levels[num] # .values
labels = self.labels[num]
filled = com.take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._simple_new(filled, self.names[num],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [com.pprint_thing(na if isnull(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in com.take_1d(lev.values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels,
start=int(names),
sentinel=sentinel)
if adjoin:
return com.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(1) for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
def is_lexsorted_for_tuple(self, tup):
"""
Return True if we are correctly lexsorted given the passed tuple
"""
return len(tup) <= self.lexsort_depth
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [com._ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if lib.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
from pandas.core.categorical import Categorical
if len(arrays) == 1:
name = None if names is None else names[0]
return Index(arrays[0], name=name)
cats = [Categorical.from_array(arr, ordered=True) for arr in arrays]
levels = [c.categories for c in cats]
labels = [c.codes for c in cats]
if names is None:
names = [c.name for c in cats]
return MultiIndex(levels=levels, labels=labels,
sortorder=sortorder, names=names,
verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(tuples) == 0:
# I think this is right? Not quite sure...
raise TypeError('Cannot infer number of levels from empty list')
if isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples.values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder,
names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import Categorical
from pandas.tools.util import cartesian_product
categoricals = [Categorical.from_array(it, ordered=True) for it in iterables]
labels = cartesian_product([c.codes for c in categoricals])
return MultiIndex(levels=[c.categories for c in categoricals],
labels=labels, sortorder=sortorder, names=names)
@property
def nlevels(self):
return len(self.levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
self.get_loc(key)
return True
except KeyError:
return False
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels = [lev for lev in self.levels],
labels = [label for label in self.labels],
sortorder = self.sortorder,
names = list(self.names))
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if np.isscalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels,
labels=new_labels,
names=self.names,
sortorder=sortorder,
verify_integrity=False)
def take(self, indexer, axis=None):
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other):
arrays = []
for i in range(self.nlevels):
label = self.get_level_values(i)
appended = [o.get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k.values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
def repeat(self, n):
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(n) for label in self.labels],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False)
def drop(self, labels, level=None):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
raise ValueError('labels %s not contained in axis'
% labels[mask])
return self.delete(indexer)
except Exception:
pass
inds = []
for label in labels:
loc = self.get_loc(label)
if isinstance(loc, int):
inds.append(loc)
else:
inds.extend(lrange(loc.start, loc.stop))
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~lib.ismember(self.labels[i], set(values))
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i, j):
"""
Swap level i with level j. Do not change the ordering of anything
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : MultiIndex
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(('Length of order must be same as '
'number of levels (%d), got %d')
% (self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : MultiIndex
"""
from pandas.core.groupby import _indexer_from_factorized
labels = list(self.labels)
shape = list(self.levshape)
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
sortorder = None
else:
sortorder = level[0]
indexer = _indexer_from_factorized(primary,
primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def get_indexer(self, target, method=None, limit=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index. The mask determines whether labels are
found or not in the current index
Parameters
----------
target : MultiIndex or Index (of tuples)
method : {'pad', 'ffill', 'backfill', 'bfill'}
pad / ffill: propagate LAST valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Notes
-----
This is a low-level method and probably should be used at your own risk
Examples
--------
>>> indexer, mask = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
>>> new_values[-mask] = np.nan
Returns
-------
(indexer, mask) : (ndarray, ndarray)
"""
method = com._clean_reindex_fill_method(method)
target = _ensure_index(target)
target_index = target
if isinstance(target, MultiIndex):
target_index = target._tuple_index
if target_index.dtype != object:
return np.ones(len(target_index)) * -1
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
self_index = self._tuple_index
if method == 'pad' or method == 'backfill':
indexer = self_index._get_fill_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
indexer = self_index._engine.get_indexer(target.values)
return com._ensure_platform_int(indexer)
def reindex(self, target, method=None, level=None, limit=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit)
else:
raise Exception(
"cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
@cache_readonly
def _tuple_index(self):
"""
Convert MultiIndex to an Index of tuples
Returns
-------
index : Index
"""
return Index(self.values)
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels. They can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This function assumes that the data is sorted by the first level
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise KeyError('Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label or tuple
If the key is past the lexsort depth, the return may be a boolean mask
array, otherwise it is always a slice or int.
Parameters
----------
key : label or tuple
method : None
Returns
-------
loc : int, slice object or boolean mask
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
'''convert integer indexer to boolean mask or slice if possible'''
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc)
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
def _maybe_str_to_time_stamp(key, lev):
if lev.is_all_dates and not isinstance(key, Timestamp):
try:
return Timestamp(key, tz=getattr(lev, 'tz', None))
except Exception:
pass
return key
key = _values_from_object(key)
key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = self.slice_locs(lead_key, lead_key) \
if lead_key else (0, len(self))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.labels[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) \
if len(loc) != stop - start \
else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get integer location slice for requested label or tuple
Parameters
----------
key : label or tuple
level : int/level name or list thereof
Returns
-------
loc : int or slice object
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels:
if self.is_unique:
# here we have a completely specified key, but are
# using some partial string matching here
# GH4758
can_index_exactly = any([
(l.is_all_dates and
not isinstance(k, compat.string_types))
for k, l in zip(key, self.levels)
])
if any([
l.is_all_dates for k, l in zip(key, self.levels)
]) and not can_index_exactly:
indexer = self.get_loc(key)
# we have a multiple selection here
if not isinstance(indexer, slice) \
or indexer.stop - indexer.start != 1:
return partial_selection(key, indexer)
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(_values_from_object(key)),
None)
else:
return partial_selection(key)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0):
# return a boolean indexer or a slice showing where the key is
# in the totality of values
level_index = self.levels[level]
labels = self.labels[level]
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index)-1
step = key.step
except (KeyError):
# we have a partial slice (like looking up a partial date string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start,slice) or isinstance(stop,slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
m = np.zeros(len(labels),dtype=bool)
m[np.in1d(labels,np.arange(start.start,stop.stop,step))] = True
return m
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
m = np.zeros(len(labels),dtype=bool)
m[np.in1d(labels,np.arange(start,stop+1,step))] = True
return m
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc,dtype=bool)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, tup):
"""
Given a tuple of slices/lists/labels/boolean indexer to a level-wise spec
produce an indexer to extract those locations
Parameters
----------
key : tuple of (slices/list/labels)
Returns
-------
locs : integer list of locations or boolean indexer suitable
for passing to iloc
"""
# must be lexsorted to at least as many levels
if not self.is_lexsorted_for_tuple(tup):
raise KeyError('MultiIndex Slicing requires the index to be fully lexsorted'
' tuple len ({0}), lexsort depth ({1})'.format(len(tup), self.lexsort_depth))
def _convert_indexer(r):
if isinstance(r, slice):
m = np.zeros(len(self),dtype=bool)
m[r] = True
return m
return r
ranges = []
for i,k in enumerate(tup):
if is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
if len(k) != len(self):
raise ValueError("cannot index with a boolean indexer that is"
" not the same length as the index")
ranges.append(k)
elif is_list_like(k):
# a collection of labels to include from this level (these are or'd)
indexers = []
for x in k:
try:
indexers.append(_convert_indexer(self._get_level_indexer(x, level=i)))
except (KeyError):
# ignore not founds
continue
if len(k):
ranges.append(reduce(np.logical_or, indexers))
else:
ranges.append(np.zeros(self.labels[i].shape, dtype=bool))
elif is_null_slice(k):
# empty slice
pass
elif isinstance(k,slice):
# a slice, include BOTH of the labels
ranges.append(self._get_level_indexer(k,level=i))
else:
# a single label
ranges.append(self.get_loc_level(k,level=i,drop_level=False)[0])
# identity
if len(ranges) == 0:
return slice(0,len(self))
elif len(ranges) == 1:
return ranges[0]
# construct a boolean indexer if we have a slice or boolean indexer
return reduce(np.logical_and,[ _convert_indexer(r) for r in ranges ])
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, MultiIndex):
return array_equivalent(self.values,
_values_from_object(_ensure_index(other)))
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
svalues = com.take_nd(self.levels[i].values, self.labels[i],
allow_fill=False)
ovalues = com.take_nd(other.levels[i].values, other.labels[i],
allow_fill=False)
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
result_names = self.names if self.names == other.names else None
uniq_tuples = lib.fast_unique_multiple([self.values, other.values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self
result_names = self.names if self.names == other.names else None
self_tuples = self.values
other_tuples = other.values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, MultiIndex):
if len(other) == 0:
return self
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError('other must be a MultiIndex or a list of'
' tuples')
result_names = self.names
else:
result_names = self.names if self.names == other.names else None
if self.equals(other):
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self.values) - set(other.values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
def _assert_can_do_setop(self, other):
pass
def astype(self, dtype):
if np.dtype(dtype) != np.object_:
raise TypeError('Setting %s dtype to anything other than object '
'is not supported' % self.__class__)
return self._shallow_copy()
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ('',) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError(
'Item must have length equal to number of levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
return lib.ismember(self._array_values(), set(values))
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_logical_methods_disabled()
# For utility purposes
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _ensure_frozen(array_like, categories, copy=False):
array_like = com._coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
# TODO: handle index names!
def _get_combined_index(indexes, intersect=False):
indexes = _get_distinct_indexes(indexes)
if len(indexes) == 0:
return Index([])
if len(indexes) == 1:
return indexes[0]
if intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
return index
union = _union_indexes(indexes)
return _ensure_index(union)
def _get_distinct_indexes(indexes):
return list(dict((id(x), x) for x in indexes).values())
def _union_indexes(indexes):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(lib.fast_unique_multiple_list([ conv(i) for i in inds ]))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _unique_indices(indexes)
return index
else:
return _unique_indices(indexes)
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _sanitize_and_check(indexes):
kinds = list(set([type(index) for index in indexes]))
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com._try_sort(x))
if not isinstance(x, Index) else x
for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = set([
tuple(i.names) for i in indexes if all(n is not None for n in i.names)
])
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
def _maybe_box(idx):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
if isinstance(idx, klasses):
return idx.asobject
return idx
def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
def _get_na_value(dtype):
return {np.datetime64: tslib.NaT, np.timedelta64: tslib.NaT}.get(dtype,
np.nan)
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
|
gpl-2.0
|
zhangxiaoli73/BigDL
|
pyspark/bigdl/optim/optimizer.py
|
3
|
42493
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import multiprocessing
import os
import sys
from distutils.dir_util import mkpath
from py4j.java_gateway import JavaObject
from pyspark.rdd import RDD
from bigdl.util.common import DOUBLEMAX
from bigdl.util.common import JTensor
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import callJavaFunc
from bigdl.util.common import get_node_and_core_number
from bigdl.util.common import init_engine
from bigdl.util.common import to_list
from bigdl.dataset.dataset import *
if sys.version >= '3':
long = int
unicode = str
class Top1Accuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top1 = Top1Accuracy()
creating: createTop1Accuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class TreeNNAccuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top1 = TreeNNAccuracy()
creating: createTreeNNAccuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Top5Accuracy(JavaValue):
"""
Caculate the percentage that output's max probability index equals target.
>>> top5 = Top5Accuracy()
creating: createTop5Accuracy
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class MeanAveragePrecision(JavaValue):
"""
Calculate the Mean Average Precision for top-k confident predictions.
The algorithm follows VOC Challenge after 2007
>>> MAP = MeanAveragePrecision(10, 20)
creating: createMeanAveragePrecision
"""
def __init__(self, k, classes, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, k, classes)
class MeanAveragePrecisionObjectDetection(JavaValue):
"""
Calculate the Mean Average Precision for Object Detection.
>>> MAPObj = MeanAveragePrecisionObjectDetection(20)
creating: createMeanAveragePrecisionObjectDetection
"""
def __init__(self, classes, iou=0.5, use_voc2007=False, skip_class=-1, bigdl_type="float"):
"""
:param classes: the number of classes
:param iou: the IOU threshold
:param use_voc2007: use validation method before voc2010 (i.e. voc2007)
:param skip_class: skip calculation on a specific class (e.g. background)
"""
JavaValue.__init__(self, None, bigdl_type, classes, iou, use_voc2007, skip_class)
class Loss(JavaValue):
"""
This evaluation method is calculate loss of output with respect to target
>>> from bigdl.nn.criterion import ClassNLLCriterion
>>> loss = Loss()
creating: createClassNLLCriterion
creating: createLoss
>>> loss = Loss(ClassNLLCriterion())
creating: createClassNLLCriterion
creating: createLoss
"""
def __init__(self, cri=None, bigdl_type="float"):
from bigdl.nn.criterion import ClassNLLCriterion
if cri is None:
cri = ClassNLLCriterion()
JavaValue.__init__(self, None, bigdl_type, cri)
class HitRatio(JavaValue):
"""
Hit Ratio(HR) used in recommandation application.
HR intuitively measures whether the test item is present on the top-k list.
>>> hr10 = HitRatio(k = 10)
creating: createHitRatio
"""
def __init__(self, k = 10, neg_num = 100, bigdl_type="float"):
"""
Create hit ratio validation method.
:param k: top k
:param neg_num: number of negative items.
"""
JavaValue.__init__(self, None, bigdl_type, k, neg_num)
class NDCG(JavaValue):
"""
Normalized Discounted Cumulative Gain(NDCG).
NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.
>>> ndcg = NDCG(k = 10)
creating: createNDCG
"""
def __init__(self, k = 10, neg_num = 100, bigdl_type="float"):
"""
Create NDCG validation method.
:param k: top k
:param neg_num: number of negative items.
"""
JavaValue.__init__(self, None, bigdl_type, k, neg_num)
class MAE(JavaValue):
"""
This evaluation method calculates the mean absolute error of output with respect to target.
>>> mae = MAE()
creating: createMAE
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class MaxIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxIteration is a trigger that triggers an action when training reaches
the number of iterations specified by "max".
Usually used as end_trigger when creating an Optimizer.
>>> maxIteration = MaxIteration(20)
creating: createMaxIteration
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxIteration trigger.
:param max: max
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MaxEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
MaxEpoch is a trigger that triggers an action when training reaches
the number of epochs specified by "max_epoch".
Usually used as end_trigger when creating an Optimizer.
>>> maxEpoch = MaxEpoch(2)
creating: createMaxEpoch
"""
def __init__(self, max_epoch, bigdl_type="float"):
"""
Create a MaxEpoch trigger.
:param max_epoch: max_epoch
"""
JavaValue.__init__(self, None, bigdl_type, max_epoch)
class EveryEpoch(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
EveryEpoch is a trigger that triggers an action when each epoch finishs.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> everyEpoch = EveryEpoch()
creating: createEveryEpoch
"""
def __init__(self, bigdl_type="float"):
"""
Create a EveryEpoch trigger.
"""
JavaValue.__init__(self, None, bigdl_type)
class SeveralIteration(JavaValue):
"""
A trigger specifies a timespot or several timespots during training,
and a corresponding action will be taken when the timespot(s) is reached.
SeveralIteration is a trigger that triggers an action every "n"
iterations.
Could be used as trigger in setvalidation and setcheckpoint in Optimizer,
and also in TrainSummary.set_summary_trigger.
>>> serveralIteration = SeveralIteration(2)
creating: createSeveralIteration
"""
def __init__(self, interval, bigdl_type="float"):
"""
Create a SeveralIteration trigger.
:param interval: interval is the "n" where an action is triggeredevery "n" iterations
"""
JavaValue.__init__(self, None, bigdl_type, interval)
class MaxScore(JavaValue):
"""
A trigger that triggers an action when validation score larger than "max" score
>>> maxScore = MaxScore(0.4)
creating: createMaxScore
"""
def __init__(self, max, bigdl_type="float"):
"""
Create a MaxScore trigger.
:param max: max score
"""
JavaValue.__init__(self, None, bigdl_type, max)
class MinLoss(JavaValue):
"""
A trigger that triggers an action when training loss less than "min" loss
>>> minLoss = MinLoss(0.1)
creating: createMinLoss
"""
def __init__(self, min, bigdl_type="float"):
"""
Create a MinLoss trigger.
:param min: min loss
"""
JavaValue.__init__(self, None, bigdl_type, min)
class TriggerAnd(JavaValue):
"""
A trigger contains other triggers and triggers when all of them trigger (logical AND)
>>> a = TriggerAnd(MinLoss(0.1), MaxEpoch(2))
creating: createMinLoss
creating: createMaxEpoch
creating: createTriggerAnd
"""
def __init__(self, first, *other):
"""
Create a And trigger.
:param first: first Trigger
:param other: other Trigger
"""
JavaValue.__init__(self, None, "float", first, list(other))
class TriggerOr(JavaValue):
"""
A trigger contains other triggers and triggers when any of them trigger (logical OR)
>>> o = TriggerOr(MinLoss(0.1), MaxEpoch(2))
creating: createMinLoss
creating: createMaxEpoch
creating: createTriggerOr
"""
def __init__(self, first, *other):
"""
Create a Or trigger.
:param first: first Trigger
:param other: other Trigger
"""
JavaValue.__init__(self, None, "float", first, list(other))
class Poly(JavaValue):
"""
A learning rate decay policy, where the effective learning rate
follows a polynomial decay, to be zero by the max_iteration.
Calculation: base_lr (1 - iter/max_iteration) ^ (power)
:param power: coeffient of decay, refer to calculation formula
:param max_iteration: max iteration when lr becomes zero
>>> poly = Poly(0.5, 2)
creating: createPoly
"""
def __init__(self, power, max_iteration, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, power, max_iteration)
class Exponential(JavaValue):
"""
[[Exponential]] is a learning rate schedule, which rescale the learning rate by
lr_{n + 1} = lr * decayRate `^` (iter / decayStep)
:param decay_step the inteval for lr decay
:param decay_rate decay rate
:param stair_case if true, iter / decayStep is an integer division
and the decayed learning rate follows a staircase function.
>>> exponential = Exponential(100, 0.1)
creating: createExponential
"""
def __init__(self, decay_step, decay_rate, stair_case=False, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, decay_step, decay_rate, stair_case)
class Step(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size:
:param gamma:
>>> step = Step(2, 0.3)
creating: createStep
"""
def __init__(self, step_size, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_size, gamma)
class Default(JavaValue):
"""
A learning rate decay policy, where the effective learning rate is
calculated as base_lr * gamma ^ (floor(iter / step_size))
:param step_size
:param gamma
>>> step = Default()
creating: createDefault
"""
def __init__(self, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type)
class Plateau(JavaValue):
"""
Plateau is the learning rate schedule when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor of 2-10
once learning stagnates. It monitors a quantity and if no improvement
is seen for a 'patience' number of epochs, the learning rate is reduced.
:param monitor quantity to be monitored, can be Loss or score
:param factor factor by which the learning rate will be reduced. new_lr = lr * factor
:param patience number of epochs with no improvement after which learning rate will be reduced.
:param mode one of {min, max}.
In min mode, lr will be reduced when the quantity monitored has stopped decreasing;
in max mode it will be reduced when the quantity monitored has stopped increasing
:param epsilon threshold for measuring the new optimum, to only focus on significant changes.
:param cooldown number of epochs to wait before resuming normal operation
after lr has been reduced.
:param min_lr lower bound on the learning rate.
>>> plateau = Plateau("score")
creating: createPlateau
"""
def __init__(self,
monitor,
factor=0.1,
patience=10,
mode="min",
epsilon=1e-4,
cooldown=0,
min_lr=0.0,
bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, monitor, factor, patience, mode, epsilon,
cooldown, min_lr)
class Warmup(JavaValue):
"""
A learning rate gradual increase policy, where the effective learning rate
increase delta after each iteration.
Calculation: base_lr + delta * iteration
:param delta: increase amount after each iteration
>>> warmup = Warmup(0.05)
creating: createWarmup
"""
def __init__(self, delta, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, delta)
class SequentialSchedule(JavaValue):
"""
Stack several learning rate schedulers.
:param iterationPerEpoch: iteration numbers per epoch
>>> sequentialSchedule = SequentialSchedule(5)
creating: createSequentialSchedule
>>> poly = Poly(0.5, 2)
creating: createPoly
>>> test = sequentialSchedule.add(poly, 5)
"""
def __init__(self, iteration_per_epoch, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, iteration_per_epoch)
def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
class OptimMethod(JavaValue):
def __init__(self, jvalue, bigdl_type, *args):
if (jvalue):
assert(type(jvalue) == JavaObject)
self.value = jvalue
else:
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
@staticmethod
def load(path, bigdl_type="float"):
"""
load optim method
:param path: file path
"""
return callBigDlFunc(bigdl_type, "loadOptimMethod", path)
def save(self, path, overWrite):
"""
save OptimMethod
:param path path
:param overWrite whether to overwrite
"""
method=self.value
return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite)
class SGD(OptimMethod):
"""
A plain implementation of SGD
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
:param momentum momentum
:param dampening dampening for momentum
:param nesterov enables Nesterov momentum
:param learningrates 1D tensor of individual learning rates
:param weightdecays 1D tensor of individual weight decays
>>> sgd = SGD()
creating: createDefault
creating: createSGD
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
momentum=0.0,
dampening=DOUBLEMAX,
nesterov=False,
leaningrate_schedule=None,
learningrates=None,
weightdecays=None,
bigdl_type="float"):
super(SGD, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay,
momentum, dampening, nesterov,
leaningrate_schedule if (leaningrate_schedule) else Default(),
JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
class Adagrad(OptimMethod):
"""
An implementation of Adagrad. See the original paper:
http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param weightdecay weight decay
>>> adagrad = Adagrad()
creating: createAdagrad
"""
def __init__(self,
learningrate=1e-3,
learningrate_decay=0.0,
weightdecay=0.0,
bigdl_type="float"):
super(Adagrad, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay)
class LBFGS(OptimMethod):
"""
This implementation of L-BFGS relies on a user-provided line
search function (state.lineSearch). If this function is not
provided, then a simple learningRate is used to produce fixed
size steps. Fixed size steps are much less costly than line
searches, and can be useful for stochastic problems.
The learning rate is used even when a line search is provided.
This is also useful for large-scale stochastic problems, where
opfunc is a noisy approximation of f(x). In that case, the learning
rate allows a reduction of confidence in the step size.
:param max_iter Maximum number of iterations allowed
:param max_eval Maximum number of function evaluations
:param tolfun Termination tolerance on the first-order optimality
:param tolx Termination tol on progress in terms of func/param changes
:param ncorrection
:param learningrate
:param verbose
:param linesearch A line search function
:param linesearch_options If no line search provided, then a fixed step size is used
>>> lbfgs = LBFGS()
creating: createLBFGS
"""
def __init__(self,
max_iter=20,
max_eval=DOUBLEMAX,
tolfun=1e-5,
tolx=1e-9,
ncorrection=100,
learningrate=1.0,
verbose=False,
linesearch=None,
linesearch_options=None,
bigdl_type="float"):
if linesearch or linesearch_options:
raise ValueError('linesearch and linesearch_options must be None in LBFGS')
super(LBFGS, self).__init__(None, bigdl_type, max_iter, max_eval, tolfun, tolx,
ncorrection, learningrate, verbose, linesearch, linesearch_options)
class Adadelta(OptimMethod):
"""
Adadelta implementation for SGD: http://arxiv.org/abs/1212.5701
:param decayrate interpolation parameter rho
:param epsilon for numerical stability
>>> adagrad = Adadelta()
creating: createAdadelta
"""
def __init__(self,
decayrate = 0.9,
epsilon = 1e-10,
bigdl_type="float"):
super(Adadelta, self).__init__(None, bigdl_type, decayrate, epsilon)
class Adam(OptimMethod):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adam = Adam()
creating: createAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
bigdl_type="float"):
super(Adam, self).__init__(None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon)
class ParallelAdam(OptimMethod):
"""
An implementation of Adam http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> init_engine()
>>> pAdam = ParallelAdam()
creating: createParallelAdam
"""
def __init__(self,
learningrate = 1e-3,
learningrate_decay = 0.0,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-8,
parallel_num = -1,
bigdl_type="float"):
if parallel_num == -1:
parallel_num = get_node_and_core_number()[1]
super(ParallelAdam, self).__init__(None, bigdl_type, learningrate, learningrate_decay,
beta1, beta2, epsilon, parallel_num)
class Ftrl(OptimMethod):
"""
An implementation of Ftrl https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf.
Support L1 penalty, L2 penalty and shrinkage-type L2 penalty.
:param learningrate learning rate
:param learningrate_power double, must be less or equal to zero. Default is -0.5.
:param initial_accumulator_value double, the starting value for accumulators,
require zero or positive values.
:param l1_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_regularization_strength double, must be greater or equal to zero. Default is zero.
:param l2_shrinkage_regularization_strength double, must be greater or equal to zero.
Default is zero. This differs from l2RegularizationStrength above. L2 above is a
stabilization penalty, whereas this one is a magnitude penalty.
>>> ftrl = Ftrl()
creating: createFtrl
>>> ftrl2 = Ftrl(1e-2, -0.1, 0.2, 0.3, 0.4, 0.5)
creating: createFtrl
"""
def __init__(self,
learningrate = 1e-3,
learningrate_power = -0.5,
initial_accumulator_value = 0.1,
l1_regularization_strength = 0.0,
l2_regularization_strength = 0.0,
l2_shrinkage_regularization_strength = 0.0,
bigdl_type="float"):
super(Ftrl, self).__init__(None, bigdl_type, learningrate, learningrate_power,
initial_accumulator_value,
l1_regularization_strength,
l2_regularization_strength,
l2_shrinkage_regularization_strength)
class Adamax(OptimMethod):
"""
An implementation of Adamax http://arxiv.org/pdf/1412.6980.pdf
:param learningrate learning rate
:param beta1 first moment coefficient
:param beta2 second moment coefficient
:param epsilon for numerical stability
>>> adagrad = Adamax()
creating: createAdamax
"""
def __init__(self,
learningrate = 0.002,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 1e-38,
bigdl_type="float"):
super(Adamax, self).__init__(None, bigdl_type, learningrate, beta1, beta2, epsilon)
class RMSprop(OptimMethod):
"""
An implementation of RMSprop
:param learningrate learning rate
:param learningrate_decay learning rate decay
:param decayrate decay rate, also called rho
:param epsilon for numerical stability
>>> adagrad = RMSprop()
creating: createRMSprop
"""
def __init__(self,
learningrate = 1e-2,
learningrate_decay = 0.0,
decayrate = 0.99,
epsilon = 1e-8,
bigdl_type="float"):
super(RMSprop, self).__init__(None, bigdl_type, learningrate, learningrate_decay, decayrate, epsilon)
class MultiStep(JavaValue):
"""
similar to step but it allows non uniform steps defined by stepSizes
:param step_size: the series of step sizes used for lr decay
:param gamma: coefficient of decay
>>> step = MultiStep([2, 5], 0.3)
creating: createMultiStep
"""
def __init__(self, step_sizes, gamma, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, step_sizes, gamma)
class BaseOptimizer(JavaValue):
def set_model(self, model):
"""
Set model.
:param model: new model
"""
self.value.setModel(model.value)
def set_criterion(self, criterion):
"""
set new criterion, for optimizer reuse
:param criterion: new criterion
:return:
"""
callBigDlFunc(self.bigdl_type, "setCriterion", self.value,
criterion)
def set_checkpoint(self, checkpoint_trigger,
checkpoint_path, isOverWrite=True):
"""
Configure checkpoint settings.
:param checkpoint_trigger: the interval to write snapshots
:param checkpoint_path: the path to write snapshots into
:param isOverWrite: whether to overwrite existing snapshots in path.default is True
"""
if not os.path.exists(checkpoint_path):
mkpath(checkpoint_path)
callBigDlFunc(self.bigdl_type, "setCheckPoint", self.value,
checkpoint_trigger, checkpoint_path, isOverWrite)
def set_gradclip_const(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
callBigDlFunc(self.bigdl_type, "setConstantClip", self.value, min_value, max_value)
def set_gradclip_l2norm(self, clip_norm):
"""
Configure L2 norm clipping settings.
:param clip_norm: gradient L2-Norm threshold
"""
callBigDlFunc(self.bigdl_type, "setL2NormClip", self.value, clip_norm)
def disable_gradclip(self):
"""
disable clipping.
"""
callBigDlFunc(self.bigdl_type, "disableClip", self.value)
# return a module
def optimize(self):
"""
Do an optimization.
"""
jmodel = callJavaFunc(self.value.optimize)
from bigdl.nn.layer import Layer
return Layer.of(jmodel)
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self
def set_val_summary(self, summary):
"""
Set validation summary. A ValidationSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of ValidationSummary.
:param summary: a ValidationSummary object
"""
callBigDlFunc(self.bigdl_type, "setValSummary", self.value,
summary)
return self
def prepare_input(self):
"""
Load input. Notebook user can call this method to seprate load data and
create optimizer time
"""
print("Loading input ...")
self.value.prepareInput()
def set_end_when(self, end_when):
"""
When to stop, passed in a [[Trigger]]
"""
self.value.setEndWhen(end_when.value)
return self
class Optimizer(BaseOptimizer):
# NOTE: This is a deprecated method, you should use `create` method instead.
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create a distributed optimizer.
:param model: the neural net model
:param training_rdd: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
self.pvalue = DistriOptimizer(model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method,
bigdl_type)
self.value = self.pvalue.value
self.bigdl_type = self.pvalue.bigdl_type
@staticmethod
def create(model,
training_set,
criterion,
end_trigger=None,
batch_size=32,
optim_method=None,
cores=None,
bigdl_type="float"):
"""
Create an optimizer.
Depend on the input type, the returning optimizer can be a local optimizer \
or a distributed optimizer.
:param model: the neural net model
:param training_set: (features, label) for local mode. RDD[Sample] for distributed mode.
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization. default value is MapEpoch(1)
:param batch_size: training batch size
:param cores: This is for local optimizer only and use total physical cores as the default value
"""
if not end_trigger:
end_trigger = MaxEpoch(1)
if not optim_method:
optim_method = SGD()
if isinstance(training_set, RDD) or isinstance(training_set, DataSet):
return DistriOptimizer(model=model,
training_rdd=training_set,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
bigdl_type=bigdl_type)
elif isinstance(training_set, tuple) and len(training_set) == 2:
x, y = training_set
return LocalOptimizer(X=x,
Y=y,
model=model,
criterion=criterion,
end_trigger=end_trigger,
batch_size=batch_size,
optim_method=optim_method,
cores=cores,
bigdl_type="float")
else:
raise Exception("Not supported training set: %s" % type(training_set))
def set_validation(self, batch_size, val_rdd, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param val_rdd: validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
func_name = "setValidation"
if isinstance(val_rdd, DataSet):
func_name = "setValidationFromDataSet"
callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size,
trigger, val_rdd, to_list(val_method))
def set_traindata(self, training_rdd, batch_size):
"""
Set new training dataset, for optimizer reuse
:param training_rdd: the training dataset
:param batch_size: training batch size
:return:
"""
callBigDlFunc(self.bigdl_type, "setTrainData", self.value,
training_rdd, batch_size)
class DistriOptimizer(Optimizer):
def __init__(self,
model,
training_rdd,
criterion,
end_trigger,
batch_size,
optim_method=None,
bigdl_type="float"):
"""
Create an optimizer.
:param model: the neural net model
:param training_data: the training dataset
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
"""
if not optim_method:
optim_methods = {model.name(): SGD()}
elif isinstance(optim_method, OptimMethod):
optim_methods = {model.name(): optim_method}
elif isinstance(optim_method, JavaObject):
optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
else:
optim_methods = optim_method
if isinstance(training_rdd, RDD):
JavaValue.__init__(self, None, bigdl_type, model.value,
training_rdd, criterion,
optim_methods, end_trigger, batch_size)
elif isinstance(training_rdd, DataSet):
self.bigdl_type = bigdl_type
self.value = callBigDlFunc(self.bigdl_type, "createDistriOptimizerFromDataSet",
model.value, training_rdd, criterion,
optim_methods, end_trigger, batch_size)
class LocalOptimizer(BaseOptimizer):
"""
Create an optimizer.
:param model: the neural net model
:param X: the training features which is an ndarray or list of ndarray
:param Y: the training label which is an ndarray
:param criterion: the loss function
:param optim_method: the algorithm to use for optimization,
e.g. SGD, Adagrad, etc. If optim_method is None, the default algorithm is SGD.
:param end_trigger: when to end the optimization
:param batch_size: training batch size
:param cores: by default is the total physical cores.
"""
def __init__(self,
X,
Y,
model,
criterion,
end_trigger,
batch_size,
optim_method=None,
cores=None,
bigdl_type="float"):
if not optim_method:
optim_methods = {model.name(): SGD()}
elif isinstance(optim_method, OptimMethod):
optim_methods = {model.name(): optim_method}
elif isinstance(optim_method, JavaObject):
optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
else:
optim_methods = optim_method
if cores is None:
cores = multiprocessing.cpu_count()
JavaValue.__init__(self, None, bigdl_type,
[JTensor.from_ndarray(X) for X in to_list(X)],
JTensor.from_ndarray(Y),
model.value,
criterion,
optim_methods, end_trigger, batch_size, cores)
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
"""
Configure validation settings.
:param batch_size: validation batch size
:param X_val: features of validation dataset
:param Y_val: label of validation dataset
:param trigger: validation interval
:param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
"""
if val_method is None:
val_method = [Top1Accuracy()]
callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
JTensor.from_ndarray(Y_val), to_list(val_method))
class TrainSummary(JavaValue, ):
"""
A logging facility which allows user to trace how indicators (e.g.
learning rate, training loss, throughput, etc.) change with iterations/time
in an optimization process. TrainSummary is for training indicators only
(check ValidationSummary for validation indicators). It contains necessary
information for the optimizer to know where to store the logs, how to
retrieve the logs, and so on. - The logs are written in tensorflow-compatible
format so that they can be visualized directly using tensorboard. Also the
logs can be retrieved as ndarrays and visualized using python libraries
such as matplotlib (in notebook, etc.).
Use optimizer.setTrainSummary to enable train logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a TrainSummary. Logs will be saved to log_dir/app_name/train.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve train logs by type. Return an array of records in the format
(step,value,wallClockTime). - "Step" is the iteration count by default.
:param tag: the type of the logs, Supported tags are: "LearningRate","Loss", "Throughput"
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
def set_summary_trigger(self, name, trigger):
"""
Set the interval of recording for each indicator.
:param tag: tag name. Supported tag names are "LearningRate", "Loss","Throughput", "Parameters". "Parameters" is an umbrella tag thatincludes weight, bias, gradWeight, gradBias, and some running status(eg. runningMean and runningVar in BatchNormalization). If youdidn't set any triggers, we will by default record Loss and Throughputin each iteration, while *NOT* recording LearningRate and Parameters,as recording parameters may introduce substantial overhead when themodel is very big, LearningRate is not a public attribute for allOptimMethod.
:param trigger: trigger
"""
return callBigDlFunc(self.bigdl_type, "summarySetTrigger", self.value,
name, trigger)
class ValidationSummary(JavaValue):
"""
A logging facility which allows user to trace how indicators (e.g.
validation loss, top1 accuray, top5 accuracy etc.) change with
iterations/time in an optimization process. ValidationSummary is for
validation indicators only (check TrainSummary for train indicators).
It contains necessary information for the optimizer to know where to
store the logs, how to retrieve the logs, and so on. - The logs are
written in tensorflow-compatible format so that they can be visualized
directly using tensorboard. Also the logs can be retrieved as ndarrays
and visualized using python libraries such as matplotlib
(in notebook, etc.).
Use optimizer.setValidationSummary to enable validation logger.
"""
def __init__(self, log_dir, app_name, bigdl_type="float"):
"""
Create a ValidationSummary. Logs will be saved to
log_dir/app_name/train. By default, all ValidationMethod set into
optimizer will be recorded and the recording interval is the same
as trigger of ValidationMethod in the optimizer.
:param log_dir: the root dir to store the logs
:param app_name: the application name
"""
JavaValue.__init__(self, None, bigdl_type, log_dir, app_name)
def read_scalar(self, tag):
"""
Retrieve validation logs by type. Return an array of records in the
format (step,value,wallClockTime). - "Step" is the iteration count
by default.
:param tag: the type of the logs. The tag should match the name ofthe ValidationMethod set into the optimizer. e.g."Top1AccuracyLoss","Top1Accuracy" or "Top5Accuracy".
"""
return callBigDlFunc(self.bigdl_type, "summaryReadScalar", self.value,
tag)
class L1L2Regularizer(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class ActivityRegularization(JavaValue):
"""
Apply both L1 and L2 regularization
:param l1 l1 regularization rate
:param l2 l2 regularization rate
"""
def __init__(self, l1, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1, l2)
class L1Regularizer(JavaValue):
"""
Apply L1 regularization
:param l1 l1 regularization rate
"""
def __init__(self, l1, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l1)
class L2Regularizer(JavaValue):
"""
Apply L2 regularization
:param l2 l2 regularization rate
"""
def __init__(self, l2, bigdl_type="float"):
JavaValue.__init__(self, None, bigdl_type, l2)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.optim import optimizer
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = optimizer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test optimizer",
conf=create_spark_conf())
init_engine()
globs['sc'] = sc
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
sinhrks/scikit-learn
|
examples/linear_model/plot_omp.py
|
385
|
2263
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
|
bsd-3-clause
|
stylianos-kampakis/scikit-learn
|
examples/svm/plot_oneclass.py
|
249
|
2302
|
"""
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
|
bsd-3-clause
|
kjung/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
AlexandreAbraham/my_nilearn_sandbox
|
nilearn_sandbox/fsl_tools.py
|
1
|
1705
|
import os
import glob
from xml.etree import ElementTree
from sklearn.datasets.base import Bunch
import pandas
def list_fsl_atlases(path=None):
paths = []
if path is not None:
paths.append(path)
paths.extend(os.getenv('FSLDIR', '').split(':'))
paths.extend(os.getenv('FSL_DIR', '').split(':'))
paths.append('/usr/share/fsl')
paths = [os.path.join(p, 'data', 'atlases') for p in paths]
for path in paths:
if os.path.exists(path):
break
else:
raise ValueError('Could not locate FSL data path')
return sorted(glob.glob(os.path.join(path, '*.xml')))
def list_fsl_templates(path=None):
paths = []
if path is not None:
paths.append(path)
paths.extend(os.getenv('FSLDIR', '').split(':'))
paths.extend(os.getenv('FSL_DIR', '').split(':'))
paths.append('/usr/share/fsl')
paths = [os.path.join(p, 'data', 'standard') for p in paths]
for path in paths:
if os.path.exists(path):
break
else:
raise ValueError('Could not locate FSL data path')
return sorted(glob.glob(os.path.join(path, '*.nii.gz')))
def load_fsl_atlas(xml_path):
dirname = os.path.dirname(xml_path)
tree = ElementTree.parse(xml_path)
# Store path to images
images = [os.path.join(dirname, '.' + x.text)
for x in tree.findall('.//imagefile')]
# Labels
labels = [(x.get('index'), x.get('x'), x.get('y'), x.get('z'), x.text)
for x in tree.findall('.//label')]
labels = pandas.DataFrame(labels,
columns=['index', 'x', 'y', 'z', 'label'])
return Bunch(name=tree.find('.//name').text,
images=images, labels=labels)
|
bsd-3-clause
|
koldunovn/geopandas
|
tests/test_sjoin.py
|
6
|
3347
|
from __future__ import absolute_import
import tempfile
import shutil
import numpy as np
from shapely.geometry import Point
from geopandas import GeoDataFrame, read_file
from geopandas.tools import sjoin
from .util import unittest, download_nybb
class TestSpatialJoin(unittest.TestCase):
def setUp(self):
nybb_filename = download_nybb()
self.polydf = read_file('/nybb_14a_av/nybb.shp', vfs='zip://' + nybb_filename)
self.tempdir = tempfile.mkdtemp()
self.crs = {'init': 'epsg:4326'}
N = 20
b = [int(x) for x in self.polydf.total_bounds]
self.pointdf = GeoDataFrame([
{'geometry' : Point(x, y), 'pointattr1': x + y, 'pointattr2': x - y}
for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)),
range(b[1], b[3], int((b[3]-b[1])/N)))], crs=self.crs)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_sjoin_left(self):
df = sjoin(self.pointdf, self.polydf, how='left')
self.assertEquals(df.shape, (21,8))
for i, row in df.iterrows():
self.assertEquals(row.geometry.type, 'Point')
self.assertTrue('pointattr1' in df.columns)
self.assertTrue('BoroCode' in df.columns)
def test_sjoin_right(self):
# the inverse of left
df = sjoin(self.pointdf, self.polydf, how="right")
df2 = sjoin(self.polydf, self.pointdf, how="left")
self.assertEquals(df.shape, (12, 8))
self.assertEquals(df.shape, df2.shape)
for i, row in df.iterrows():
self.assertEquals(row.geometry.type, 'MultiPolygon')
for i, row in df2.iterrows():
self.assertEquals(row.geometry.type, 'MultiPolygon')
def test_sjoin_inner(self):
df = sjoin(self.pointdf, self.polydf, how="inner")
self.assertEquals(df.shape, (11, 8))
def test_sjoin_op(self):
# points within polygons
df = sjoin(self.pointdf, self.polydf, how="left", op="within")
self.assertEquals(df.shape, (21,8))
self.assertAlmostEquals(df.ix[1]['Shape_Leng'], 330454.175933)
# points contain polygons? never happens so we should have nulls
df = sjoin(self.pointdf, self.polydf, how="left", op="contains")
self.assertEquals(df.shape, (21, 8))
self.assertTrue(np.isnan(df.ix[1]['Shape_Area']))
def test_sjoin_bad_op(self):
# AttributeError: 'Point' object has no attribute 'spandex'
self.assertRaises(ValueError, sjoin,
self.pointdf, self.polydf, how="left", op="spandex")
def test_sjoin_duplicate_column_name(self):
pointdf2 = self.pointdf.rename(columns={'pointattr1': 'Shape_Area'})
df = sjoin(pointdf2, self.polydf, how="left")
self.assertTrue('Shape_Area_left' in df.columns)
self.assertTrue('Shape_Area_right' in df.columns)
def test_sjoin_values(self):
# GH190
self.polydf.index = [1, 3, 4, 5, 6]
df = sjoin(self.pointdf, self.polydf, how='left')
self.assertEquals(df.shape, (21,8))
df = sjoin(self.polydf, self.pointdf, how='left')
self.assertEquals(df.shape, (12,8))
@unittest.skip("Not implemented")
def test_sjoin_outer(self):
df = sjoin(self.pointdf, self.polydf, how="outer")
self.assertEquals(df.shape, (21,8))
|
bsd-3-clause
|
Robbie1977/NRRDtools
|
PreProcess.py
|
1
|
1725
|
from numpy import unique, bincount, shape, min, max, sum, array, uint32, where, uint8, round
import nrrd, sys, os,
from matplotlib.pyplot import imshow, show, figure
def AutoBalance(data,threshold=0.00035,background=0):
bins=unique(data)
binc=bincount(data.flat)
histogram=binc[binc>0]
del binc
if background in bins:
i = where(bins==background)
v = bins[i][0]
c = histogram[i][0]
th=int(((sum(histogram)-histogram[i][0])/shape(data)[2])*threshold)
else:
th=int((sum(histogram)/shape(data)[2])*threshold)
m=min(bins)
M=max(bins)
for x in range(1,shape(bins)[0]-1):
if sum(histogram[:x]) > th:
m = x-1
break
for x in range(shape(bins)[0]-1,0,-1):
if sum(histogram[x:]) > th:
M = x
break
data[data>M]=M
data[data<m]=m
dataA=round((data-m)*(255.0/(M-m)))
return (dataA, array([m, M], dtype=uint32), array([bins,histogram],dtype=uint32))
if (len(sys.argv) < 2):
print 'e.g. python PreProcess.py image.nrrd output.nrrd'
else:
fileName, fileExtension = os.path.splitext(sys.argv[1])
if fileExtension=='.nrrd':
data1, header1 = nrrd.read(sys.argv[1])
elif fileExtension=='.lsm':
# TBA using pylsm
#data1, header1 = nrrd.read('/Volumes/Macintosh HD/Users/robertcourt/BTSync/usedtemplate.nrrd')
#data1r, values, hist = AutoBalance(data1)
#print values
#
#data1, header1 = nrrd.read('/Volumes/Macintosh HD/Users/robertcourt/BTSync/engrailed_MARCM1_Fz-PP_BG-aligned.nrrd')
#data1r, values, hist = AutoBalance(data1)
#
#print values
#
#im1=imshow(max(data1,axis=2))
#figure()
#im2=imshow(max(data1r,axis=2))
#show()
|
mit
|
shenzebang/scikit-learn
|
sklearn/tree/tree.py
|
113
|
34767
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
Winand/pandas
|
pandas/core/accessor.py
|
5
|
4242
|
# -*- coding: utf-8 -*-
"""
accessor.py contains base classes for implementing accessor properties
that can be mixed into or pinned onto other pandas classes.
"""
from pandas.core.common import AbstractMethodError
class DirNamesMixin(object):
_accessors = frozenset([])
_deprecations = frozenset([])
def _dir_deletions(self):
""" delete unwanted __dir__ for this object """
return self._accessors | self._deprecations
def _dir_additions(self):
""" add addtional __dir__ for this object """
rv = set()
for accessor in self._accessors:
try:
getattr(self, accessor)
rv.add(accessor)
except AttributeError:
pass
return rv
def __dir__(self):
"""
Provide method name lookup and completion
Only provide 'public' methods
"""
rv = set(dir(type(self)))
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
class AccessorProperty(object):
"""Descriptor for implementing accessor properties like Series.str
"""
def __init__(self, accessor_cls, construct_accessor=None):
self.accessor_cls = accessor_cls
self.construct_accessor = (construct_accessor or
accessor_cls._make_accessor)
self.__doc__ = accessor_cls.__doc__
def __get__(self, instance, owner=None):
if instance is None:
# this ensures that Series.str.<method> is well defined
return self.accessor_cls
return self.construct_accessor(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
class PandasDelegate(object):
""" an abstract base class for delegating methods/properties """
@classmethod
def _make_accessor(cls, data):
raise AbstractMethodError("_make_accessor should be implemented"
"by subclass and return an instance"
"of `cls`.")
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError("You cannot access the "
"property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
"""
add accessors to cls from the delegate class
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
acccessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter,
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
|
bsd-3-clause
|
untom/keras
|
examples/kaggle_otto_nn.py
|
70
|
3775
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
|
mit
|
youssef-emad/shogun
|
examples/undocumented/python_modular/graphical/so_multiclass_BMRM.py
|
10
|
2835
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RealFeatures
from modshogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM
from modshogun import BMRM, PPBMRM, P3BMRM
from modshogun import StructuredAccuracy
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 1000
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 250
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = np.array(res.get_hist_Fp_vector())
Fds = np.array(res.get_hist_Fp_vector())
wdists = np.array(res.get_hist_wdist_vector())
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
|
gpl-3.0
|
sanketloke/scikit-learn
|
sklearn/utils/extmath.py
|
16
|
26642
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=None,
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter: int (default is 4)
Number of power iterations. It can be used to deal with very noisy
problems. When `n_components` is small (< .1 * min(X.shape)) `n_iter`
is set to 7, unless the user specifies a higher number. This improves
precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter is None:
# Checks if the number of iterations is explicitely specified
n_iter = 4
n_iter_specified = False
else:
n_iter_specified = True
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
if n_components < .1 * min(M.shape) and n_iter < 7:
if n_iter_specified:
warnings.warn("The number of power iterations is increased to "
"7 to achieve higher precision.")
n_iter = 7
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
|
bsd-3-clause
|
WarrenWeckesser/scikits-image
|
doc/examples/plot_swirl.py
|
6
|
2647
|
"""
=====
Swirl
=====
Image swirling is a non-linear image deformation that creates a whirlpool
effect. This example describes the implementation of this transform in
``skimage``, as well as the underlying warp mechanism.
Image warping
-------------
When applying a geometric transformation on an image, we typically make use of
a reverse mapping, i.e., for each pixel in the output image, we compute its
corresponding position in the input. The reason is that, if we were to do it
the other way around (map each input pixel to its new output position), some
pixels in the output may be left empty. On the other hand, each output
coordinate has exactly one corresponding location in (or outside) the input
image, and even if that position is non-integer, we may use interpolation to
compute the corresponding image value.
Performing a reverse mapping
----------------------------
To perform a geometric warp in ``skimage``, you simply need to provide the
reverse mapping to the ``skimage.transform.warp`` function. E.g., consider the
case where we would like to shift an image 50 pixels to the left. The reverse
mapping for such a shift would be::
def shift_left(xy):
xy[:, 0] += 50
return xy
The corresponding call to warp is::
from skimage.transform import warp
warp(image, shift_left)
The swirl transformation
------------------------
Consider the coordinate :math:`(x, y)` in the output image. The reverse
mapping for the swirl transformation first computes, relative to a center
:math:`(x_0, y_0)`, its polar coordinates,
.. math::
\\theta = \\arctan(y/x)
\\rho = \sqrt{(x - x_0)^2 + (y - y_0)^2},
and then transforms them according to
.. math::
r = \ln(2) \, \mathtt{radius} / 5
\phi = \mathtt{rotation}
s = \mathtt{strength}
\\theta' = \phi + s \, e^{-\\rho / r + \\theta}
where ``strength`` is a parameter for the amount of swirl, ``radius`` indicates
the swirl extent in pixels, and ``rotation`` adds a rotation angle. The
transformation of ``radius`` into :math:`r` is to ensure that the
transformation decays to :math:`\\approx 1/1000^{\mathsf{th}}` within the
specified radius.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.transform import swirl
image = data.checkerboard()
swirled = swirl(image, rotation=0, strength=10, radius=120, order=2)
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 3), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax0.imshow(image, cmap=plt.cm.gray, interpolation='none')
ax0.axis('off')
ax1.imshow(swirled, cmap=plt.cm.gray, interpolation='none')
ax1.axis('off')
plt.show()
|
bsd-3-clause
|
zuotingbing/spark
|
python/pyspark/sql/tests/test_pandas_udf_scalar.py
|
3
|
52109
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import random
import shutil
import sys
import tempfile
import time
import unittest
if sys.version >= '3':
unicode = str
from datetime import date, datetime
from decimal import Decimal
from pyspark import TaskContext
from pyspark.rdd import PythonEvalType
from pyspark.sql import Column
from pyspark.sql.functions import array, col, expr, lit, sum, struct, udf, pandas_udf, \
PandasUDFType
from pyspark.sql.types import Row
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, test_compiled,\
test_not_compiled_message, have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ScalarPandasUDFTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
ReusedSQLTestCase.setUpClass()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.sc.environment["TZ"] = tz
cls.spark.conf.set("spark.sql.session.timeZone", tz)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
ReusedSQLTestCase.tearDownClass()
@property
def nondeterministic_vectorized_udf(self):
import numpy as np
@pandas_udf('double')
def random_udf(v):
return pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
@property
def nondeterministic_vectorized_iter_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.SCALAR_ITER)
def random_udf(it):
for v in it:
yield pd.Series(np.random.random(len(v)))
random_udf = random_udf.asNondeterministic()
return random_udf
def test_pandas_udf_tokenize(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: str.split(' ')),
ArrayType(StringType()))
self.assertEqual(tokenize.returnType, ArrayType(StringType()))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[u'hi', u'boo']), Row(hi=[u'bye', u'boo'])], result.collect())
def test_pandas_udf_nested_arrays(self):
tokenize = pandas_udf(lambda s: s.apply(lambda str: [str.split(' ')]),
ArrayType(ArrayType(StringType())))
self.assertEqual(tokenize.returnType, ArrayType(ArrayType(StringType())))
df = self.spark.createDataFrame([("hi boo",), ("bye boo",)], ["vals"])
result = df.select(tokenize("vals").alias("hi"))
self.assertEqual([Row(hi=[[u'hi', u'boo']]), Row(hi=[[u'bye', u'boo']])], result.collect())
def test_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'),
array(col('id')).alias('array_long'))
f = lambda x: x
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(f, StringType(), udf_type)
int_f = pandas_udf(f, IntegerType(), udf_type)
long_f = pandas_udf(f, LongType(), udf_type)
float_f = pandas_udf(f, FloatType(), udf_type)
double_f = pandas_udf(f, DoubleType(), udf_type)
decimal_f = pandas_udf(f, DecimalType(), udf_type)
bool_f = pandas_udf(f, BooleanType(), udf_type)
array_long_f = pandas_udf(f, ArrayType(LongType()), udf_type)
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')), array_long_f('array_long'))
self.assertEquals(df.collect(), res.collect())
def test_register_nondeterministic_vectorized_udf_basic(self):
random_pandas_udf = pandas_udf(
lambda x: random.randint(6, 6) + x, IntegerType()).asNondeterministic()
self.assertEqual(random_pandas_udf.deterministic, False)
self.assertEqual(random_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
nondeterministic_pandas_udf = self.spark.catalog.registerFunction(
"randomPandasUDF", random_pandas_udf)
self.assertEqual(nondeterministic_pandas_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
[row] = self.spark.sql("SELECT randomPandasUDF(1)").collect()
self.assertEqual(row[0], 7)
def random_iter_udf(it):
for i in it:
yield random.randint(6, 6) + i
random_pandas_iter_udf = pandas_udf(
random_iter_udf, IntegerType(), PandasUDFType.SCALAR_ITER).asNondeterministic()
self.assertEqual(random_pandas_iter_udf.deterministic, False)
self.assertEqual(random_pandas_iter_udf.evalType, PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
nondeterministic_pandas_iter_udf = self.spark.catalog.registerFunction(
"randomPandasIterUDF", random_pandas_iter_udf)
self.assertEqual(nondeterministic_pandas_iter_udf.deterministic, False)
self.assertEqual(nondeterministic_pandas_iter_udf.evalType,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
[row] = self.spark.sql("SELECT randomPandasIterUDF(1)").collect()
self.assertEqual(row[0], 7)
def test_vectorized_udf_null_boolean(self):
data = [(True,), (True,), (None,), (False,)]
schema = StructType().add("bool", BooleanType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
bool_f = pandas_udf(lambda x: x, BooleanType(), udf_type)
res = df.select(bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_byte(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("byte", ByteType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
byte_f = pandas_udf(lambda x: x, ByteType(), udf_type)
res = df.select(byte_f(col('byte')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_short(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("short", ShortType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
short_f = pandas_udf(lambda x: x, ShortType(), udf_type)
res = df.select(short_f(col('short')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_int(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("int", IntegerType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
int_f = pandas_udf(lambda x: x, IntegerType(), udf_type)
res = df.select(int_f(col('int')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_long(self):
data = [(None,), (2,), (3,), (4,)]
schema = StructType().add("long", LongType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
long_f = pandas_udf(lambda x: x, LongType(), udf_type)
res = df.select(long_f(col('long')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_float(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("float", FloatType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
float_f = pandas_udf(lambda x: x, FloatType(), udf_type)
res = df.select(float_f(col('float')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_double(self):
data = [(3.0,), (5.0,), (-1.0,), (None,)]
schema = StructType().add("double", DoubleType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
double_f = pandas_udf(lambda x: x, DoubleType(), udf_type)
res = df.select(double_f(col('double')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_decimal(self):
data = [(Decimal(3.0),), (Decimal(5.0),), (Decimal(-1.0),), (None,)]
schema = StructType().add("decimal", DecimalType(38, 18))
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
decimal_f = pandas_udf(lambda x: x, DecimalType(38, 18), udf_type)
res = df.select(decimal_f(col('decimal')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_string(self):
data = [("foo",), (None,), ("bar",), ("bar",)]
schema = StructType().add("str", StringType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(lambda x: x, StringType(), udf_type)
res = df.select(str_f(col('str')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_string_in_udf(self):
df = self.spark.range(10)
scalar_f = lambda x: pd.Series(map(str, x))
def iter_f(it):
for i in it:
yield scalar_f(i)
for f, udf_type in [(scalar_f, PandasUDFType.SCALAR), (iter_f, PandasUDFType.SCALAR_ITER)]:
str_f = pandas_udf(f, StringType(), udf_type)
actual = df.select(str_f(col('id')))
expected = df.select(col('id').cast('string'))
self.assertEquals(expected.collect(), actual.collect())
def test_vectorized_udf_datatype_string(self):
df = self.spark.range(10).select(
col('id').cast('string').alias('str'),
col('id').cast('int').alias('int'),
col('id').alias('long'),
col('id').cast('float').alias('float'),
col('id').cast('double').alias('double'),
col('id').cast('decimal').alias('decimal'),
col('id').cast('boolean').alias('bool'))
f = lambda x: x
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(f, 'string', udf_type)
int_f = pandas_udf(f, 'integer', udf_type)
long_f = pandas_udf(f, 'long', udf_type)
float_f = pandas_udf(f, 'float', udf_type)
double_f = pandas_udf(f, 'double', udf_type)
decimal_f = pandas_udf(f, 'decimal(38, 18)', udf_type)
bool_f = pandas_udf(f, 'boolean', udf_type)
res = df.select(str_f(col('str')), int_f(col('int')),
long_f(col('long')), float_f(col('float')),
double_f(col('double')), decimal_f('decimal'),
bool_f(col('bool')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_null_binary(self):
data = [(bytearray(b"a"),), (None,), (bytearray(b"bb"),), (bytearray(b"ccc"),)]
schema = StructType().add("binary", BinaryType())
df = self.spark.createDataFrame(data, schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
str_f = pandas_udf(lambda x: x, BinaryType(), udf_type)
res = df.select(str_f(col('binary')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_array_type(self):
data = [([1, 2],), ([3, 4],)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()), udf_type)
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_null_array(self):
data = [([1, 2],), (None,), (None,), ([3, 4],), (None,)]
array_schema = StructType([StructField("array", ArrayType(IntegerType()))])
df = self.spark.createDataFrame(data, schema=array_schema)
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
array_f = pandas_udf(lambda x: x, ArrayType(IntegerType()), udf_type)
result = df.select(array_f(col('array')))
self.assertEquals(df.collect(), result.collect())
def test_vectorized_udf_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
def scalar_func(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
def iter_func(it):
for id in it:
yield scalar_func(id)
for func, udf_type in [(scalar_func, PandasUDFType.SCALAR),
(iter_func, PandasUDFType.SCALAR_ITER)]:
f = pandas_udf(func, returnType=return_type, functionType=udf_type)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
actual = df.select(f(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
g = pandas_udf(func, 'id: long, str: string', functionType=udf_type)
actual = df.select(g(col('id')).alias('struct')).collect()
self.assertEqual(expected, actual)
struct_f = pandas_udf(lambda x: x, return_type, functionType=udf_type)
actual = df.select(struct_f(struct(col('id'), col('id').cast('string').alias('str'))))
self.assertEqual(expected, actual.collect())
def test_vectorized_udf_struct_complex(self):
df = self.spark.range(10)
return_type = StructType([
StructField('ts', TimestampType()),
StructField('arr', ArrayType(LongType()))])
def _scalar_f(id):
return pd.DataFrame({'ts': id.apply(lambda i: pd.Timestamp(i)),
'arr': id.apply(lambda i: [i, i + 1])})
scalar_f = pandas_udf(_scalar_f, returnType=return_type)
@pandas_udf(returnType=return_type, functionType=PandasUDFType.SCALAR_ITER)
def iter_f(it):
for id in it:
yield _scalar_f(id)
for f, udf_type in [(scalar_f, PandasUDFType.SCALAR), (iter_f, PandasUDFType.SCALAR_ITER)]:
actual = df.withColumn('f', f(col('id'))).collect()
for i, row in enumerate(actual):
id, f = row
self.assertEqual(i, id)
self.assertEqual(pd.Timestamp(i).to_pydatetime(), f[0])
self.assertListEqual([i, i + 1], f[1])
def test_vectorized_udf_nested_struct(self):
nested_type = StructType([
StructField('id', IntegerType()),
StructField('nested', StructType([
StructField('foo', StringType()),
StructField('bar', FloatType())
]))
])
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Invalid return type with scalar Pandas UDFs'):
pandas_udf(lambda x: x, returnType=nested_type, functionType=udf_type)
def test_vectorized_udf_complex(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'),
col('id').cast('double').alias('c'))
scalar_add = pandas_udf(lambda x, y: x + y, IntegerType())
scalar_power2 = pandas_udf(lambda x: 2 ** x, IntegerType())
scalar_mul = pandas_udf(lambda x, y: x * y, DoubleType())
@pandas_udf(IntegerType(), PandasUDFType.SCALAR_ITER)
def iter_add(it):
for x, y in it:
yield x + y
@pandas_udf(IntegerType(), PandasUDFType.SCALAR_ITER)
def iter_power2(it):
for x in it:
yield 2 ** x
@pandas_udf(DoubleType(), PandasUDFType.SCALAR_ITER)
def iter_mul(it):
for x, y in it:
yield x * y
for add, power2, mul in [(scalar_add, scalar_power2, scalar_mul),
(iter_add, iter_power2, iter_mul)]:
res = df.select(add(col('a'), col('b')), power2(col('a')), mul(col('b'), col('c')))
expected = df.select(expr('a + b'), expr('power(2, a)'), expr('b * c'))
self.assertEquals(expected.collect(), res.collect())
def test_vectorized_udf_exception(self):
df = self.spark.range(10)
scalar_raise_exception = pandas_udf(lambda x: x * (1 / 0), LongType())
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_raise_exception(it):
for x in it:
yield x * (1 / 0)
for raise_exception in [scalar_raise_exception, iter_raise_exception]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'division( or modulo)? by zero'):
df.select(raise_exception(col('id'))).collect()
def test_vectorized_udf_invalid_length(self):
df = self.spark.range(10)
raise_exception = pandas_udf(lambda _: pd.Series(1), LongType())
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
'Result vector from pandas_udf was not the required length'):
df.select(raise_exception(col('id'))).collect()
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_udf_wong_output_size(it):
for _ in it:
yield pd.Series(1)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
"The length of output in Scalar iterator.*"
"the length of output was 1"):
df.select(iter_udf_wong_output_size(col('id'))).collect()
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_udf_not_reading_all_input(it):
for batch in it:
batch_len = len(batch)
yield pd.Series([1] * batch_len)
break
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df1 = self.spark.range(10).repartition(1)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
Exception,
"pandas iterator UDF should exhaust"):
df1.select(iter_udf_not_reading_all_input(col('id'))).collect()
def test_vectorized_udf_chained(self):
df = self.spark.range(10)
scalar_f = pandas_udf(lambda x: x + 1, LongType())
scalar_g = pandas_udf(lambda x: x - 1, LongType())
iter_f = pandas_udf(lambda it: map(lambda x: x + 1, it), LongType(),
PandasUDFType.SCALAR_ITER)
iter_g = pandas_udf(lambda it: map(lambda x: x - 1, it), LongType(),
PandasUDFType.SCALAR_ITER)
for f, g in [(scalar_f, scalar_g), (iter_f, iter_g)]:
res = df.select(g(f(col('id'))))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_chained_struct_type(self):
df = self.spark.range(10)
return_type = StructType([
StructField('id', LongType()),
StructField('str', StringType())])
@pandas_udf(return_type)
def scalar_f(id):
return pd.DataFrame({'id': id, 'str': id.apply(unicode)})
scalar_g = pandas_udf(lambda x: x, return_type)
@pandas_udf(return_type, PandasUDFType.SCALAR_ITER)
def iter_f(it):
for id in it:
yield pd.DataFrame({'id': id, 'str': id.apply(unicode)})
iter_g = pandas_udf(lambda x: x, return_type, PandasUDFType.SCALAR_ITER)
expected = df.select(struct(col('id'), col('id').cast('string').alias('str'))
.alias('struct')).collect()
for f, g in [(scalar_f, scalar_g), (iter_f, iter_g)]:
actual = df.select(g(f(col('id'))).alias('struct')).collect()
self.assertEqual(expected, actual)
def test_vectorized_udf_wrong_return_type(self):
with QuietTest(self.sc):
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(LongType(), LongType()), udf_type)
def test_vectorized_udf_return_scalar(self):
df = self.spark.range(10)
scalar_f = pandas_udf(lambda x: 1.0, DoubleType())
iter_f = pandas_udf(lambda it: map(lambda x: 1.0, it), DoubleType(),
PandasUDFType.SCALAR_ITER)
for f in [scalar_f, iter_f]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Return.*type.*Series'):
df.select(f(col('id'))).collect()
def test_vectorized_udf_decorator(self):
df = self.spark.range(10)
@pandas_udf(returnType=LongType())
def scalar_identity(x):
return x
@pandas_udf(returnType=LongType(), functionType=PandasUDFType.SCALAR_ITER)
def iter_identity(x):
return x
for identity in [scalar_identity, iter_identity]:
res = df.select(identity(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
f = pandas_udf(lambda x: x, LongType(), udf_type)
res = df.select(f(col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_struct_with_empty_partition(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))\
.withColumn('name', lit('John Doe'))
@pandas_udf("first string, last string")
def scalar_split_expand(n):
return n.str.split(expand=True)
@pandas_udf("first string, last string", PandasUDFType.SCALAR_ITER)
def iter_split_expand(it):
for n in it:
yield n.str.split(expand=True)
for split_expand in [scalar_split_expand, iter_split_expand]:
result = df.select(split_expand('name')).collect()
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('John', row[0]['first'])
self.assertEqual('Doe', row[0]['last'])
def test_vectorized_udf_varargs(self):
df = self.spark.createDataFrame(self.sc.parallelize([Row(id=1)], 2))
scalar_f = pandas_udf(lambda *v: v[0], LongType())
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_f(it):
for v in it:
yield v[0]
for f in [scalar_f, iter_f]:
res = df.select(f(col('id'), col('id')))
self.assertEquals(df.collect(), res.collect())
def test_vectorized_udf_unsupported_types(self):
with QuietTest(self.sc):
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*scalar Pandas UDF.*MapType'):
pandas_udf(lambda x: x, MapType(StringType(), IntegerType()), udf_type)
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid return type.*scalar Pandas UDF.*ArrayType.StructType'):
pandas_udf(lambda x: x,
ArrayType(StructType([StructField('a', IntegerType())])), udf_type)
def test_vectorized_udf_dates(self):
schema = StructType().add("idx", LongType()).add("date", DateType())
data = [(0, date(1969, 1, 1),),
(1, date(2012, 2, 2),),
(2, None,),
(3, date(2100, 4, 4),),
(4, date(2262, 4, 12),)]
df = self.spark.createDataFrame(data, schema=schema)
def scalar_check_data(idx, date, date_copy):
msgs = []
is_equal = date.isnull()
for i in range(len(idx)):
if (is_equal[i] and data[idx[i]][1] is None) or \
date[i] == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"date values are not equal (date='%s': data[%d][1]='%s')"
% (date[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
def iter_check_data(it):
for idx, date, date_copy in it:
yield scalar_check_data(idx, date, date_copy)
pandas_scalar_check_data = pandas_udf(scalar_check_data, StringType())
pandas_iter_check_data = pandas_udf(iter_check_data, StringType(),
PandasUDFType.SCALAR_ITER)
for check_data, udf_type in [(pandas_scalar_check_data, PandasUDFType.SCALAR),
(pandas_iter_check_data, PandasUDFType.SCALAR_ITER)]:
date_copy = pandas_udf(lambda t: t, returnType=DateType(), functionType=udf_type)
df = df.withColumn("date_copy", date_copy(col("date")))
result = df.withColumn("check_data",
check_data(col("idx"), col("date"), col("date_copy"))).collect()
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "date" col
self.assertEquals(data[i][1], result[i][2]) # "date_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_timestamps(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(0, datetime(1969, 1, 1, 1, 1, 1)),
(1, datetime(2012, 2, 2, 2, 2, 2)),
(2, None),
(3, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
def scalar_check_data(idx, timestamp, timestamp_copy):
msgs = []
is_equal = timestamp.isnull() # use this array to check values are equal
for i in range(len(idx)):
# Check that timestamps are as expected in the UDF
if (is_equal[i] and data[idx[i]][1] is None) or \
timestamp[i].to_pydatetime() == data[idx[i]][1]:
msgs.append(None)
else:
msgs.append(
"timestamp values are not equal (timestamp='%s': data[%d][1]='%s')"
% (timestamp[i], idx[i], data[idx[i]][1]))
return pd.Series(msgs)
def iter_check_data(it):
for idx, timestamp, timestamp_copy in it:
yield scalar_check_data(idx, timestamp, timestamp_copy)
pandas_scalar_check_data = pandas_udf(scalar_check_data, StringType())
pandas_iter_check_data = pandas_udf(iter_check_data, StringType(),
PandasUDFType.SCALAR_ITER)
for check_data, udf_type in [(pandas_scalar_check_data, PandasUDFType.SCALAR),
(pandas_iter_check_data, PandasUDFType.SCALAR_ITER)]:
# Check that a timestamp passed through a pandas_udf will not be altered by timezone
# calc
f_timestamp_copy = pandas_udf(lambda t: t,
returnType=TimestampType(), functionType=udf_type)
df = df.withColumn("timestamp_copy", f_timestamp_copy(col("timestamp")))
result = df.withColumn("check_data", check_data(col("idx"), col("timestamp"),
col("timestamp_copy"))).collect()
# Check that collection values are correct
self.assertEquals(len(data), len(result))
for i in range(len(result)):
self.assertEquals(data[i][1], result[i][1]) # "timestamp" col
self.assertEquals(data[i][1], result[i][2]) # "timestamp_copy" col
self.assertIsNone(result[i][3]) # "check_data" col
def test_vectorized_udf_return_timestamp_tz(self):
df = self.spark.range(10)
@pandas_udf(returnType=TimestampType())
def scalar_gen_timestamps(id):
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
return pd.Series(ts)
@pandas_udf(returnType=TimestampType(), functionType=PandasUDFType.SCALAR_ITER)
def iter_gen_timestamps(it):
for id in it:
ts = [pd.Timestamp(i, unit='D', tz='America/Los_Angeles') for i in id]
yield pd.Series(ts)
for gen_timestamps in [scalar_gen_timestamps, iter_gen_timestamps]:
result = df.withColumn("ts", gen_timestamps(col("id"))).collect()
spark_ts_t = TimestampType()
for r in result:
i, ts = r
ts_tz = pd.Timestamp(i, unit='D', tz='America/Los_Angeles').to_pydatetime()
expected = spark_ts_t.fromInternal(spark_ts_t.toInternal(ts_tz))
self.assertEquals(expected, ts)
def test_vectorized_udf_check_config(self):
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 3}):
df = self.spark.range(10, numPartitions=1)
@pandas_udf(returnType=LongType())
def scalar_check_records_per_batch(x):
return pd.Series(x.size).repeat(x.size)
@pandas_udf(returnType=LongType(), functionType=PandasUDFType.SCALAR_ITER)
def iter_check_records_per_batch(it):
for x in it:
yield pd.Series(x.size).repeat(x.size)
for check_records_per_batch in [scalar_check_records_per_batch,
iter_check_records_per_batch]:
result = df.select(check_records_per_batch(col("id"))).collect()
for (r,) in result:
self.assertTrue(r <= 3)
def test_vectorized_udf_timestamps_respect_session_timezone(self):
schema = StructType([
StructField("idx", LongType(), True),
StructField("timestamp", TimestampType(), True)])
data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
(2, datetime(2012, 2, 2, 2, 2, 2)),
(3, None),
(4, datetime(2100, 3, 3, 3, 3, 3))]
df = self.spark.createDataFrame(data, schema=schema)
scalar_internal_value = pandas_udf(
lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())
@pandas_udf(LongType(), PandasUDFType.SCALAR_ITER)
def iter_internal_value(it):
for ts in it:
yield ts.apply(lambda ts: ts.value if ts is not pd.NaT else None)
for internal_value, udf_type in [(scalar_internal_value, PandasUDFType.SCALAR),
(iter_internal_value, PandasUDFType.SCALAR_ITER)]:
f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType(), udf_type)
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_la = df_la.select(col("idx"), col("internal_value")).collect()
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
diff = 3 * 60 * 60 * 1000 * 1000 * 1000
result_la_corrected = \
df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
.withColumn("internal_value", internal_value(col("timestamp")))
result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()
self.assertNotEqual(result_ny, result_la)
self.assertEqual(result_ny, result_la_corrected)
def test_nondeterministic_vectorized_udf(self):
# Test that nondeterministic UDFs are evaluated only once in chained UDF evaluations
@pandas_udf('double')
def scalar_plus_ten(v):
return v + 10
@pandas_udf('double', PandasUDFType.SCALAR_ITER)
def iter_plus_ten(it):
for v in it:
yield v + 10
for plus_ten in [scalar_plus_ten, iter_plus_ten]:
random_udf = self.nondeterministic_vectorized_udf
df = self.spark.range(10).withColumn('rand', random_udf(col('id')))
result1 = df.withColumn('plus_ten(rand)', plus_ten(df['rand'])).toPandas()
self.assertEqual(random_udf.deterministic, False)
self.assertTrue(result1['plus_ten(rand)'].equals(result1['rand'] + 10))
def test_nondeterministic_vectorized_udf_in_aggregate(self):
df = self.spark.range(10)
for random_udf in [self.nondeterministic_vectorized_udf,
self.nondeterministic_vectorized_iter_udf]:
with QuietTest(self.sc):
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.groupby(df.id).agg(sum(random_udf(df.id))).collect()
with self.assertRaisesRegexp(AnalysisException, 'nondeterministic'):
df.agg(sum(random_udf(df.id))).collect()
def test_register_vectorized_udf_basic(self):
df = self.spark.range(10).select(
col('id').cast('int').alias('a'),
col('id').cast('int').alias('b'))
scalar_original_add = pandas_udf(lambda x, y: x + y, IntegerType())
self.assertEqual(scalar_original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_UDF)
@pandas_udf(IntegerType(), PandasUDFType.SCALAR_ITER)
def iter_original_add(it):
for x, y in it:
yield x + y
self.assertEqual(iter_original_add.evalType, PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF)
for original_add in [scalar_original_add, iter_original_add]:
self.assertEqual(original_add.deterministic, True)
new_add = self.spark.catalog.registerFunction("add1", original_add)
res1 = df.select(new_add(col('a'), col('b')))
res2 = self.spark.sql(
"SELECT add1(t.a, t.b) FROM (SELECT id as a, id as b FROM range(10)) t")
expected = df.select(expr('a + b'))
self.assertEquals(expected.collect(), res1.collect())
self.assertEquals(expected.collect(), res2.collect())
def test_scalar_iter_udf_init(self):
import numpy as np
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def rng(batch_iter):
context = TaskContext.get()
part = context.partitionId()
np.random.seed(part)
for batch in batch_iter:
yield pd.Series(np.random.randint(100, size=len(batch)))
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 2}):
df = self.spark.range(10, numPartitions=2).select(rng(col("id").alias("v")))
result1 = df.collect()
result2 = df.collect()
self.assertEqual(result1, result2,
"SCALAR ITER UDF can initialize state and produce deterministic RNG")
def test_scalar_iter_udf_close(self):
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def test_close(batch_iter):
try:
for batch in batch_iter:
yield batch
finally:
raise RuntimeError("reached finally block")
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "reached finally block"):
self.spark.range(1).select(test_close(col("id"))).collect()
def test_scalar_iter_udf_close_early(self):
tmp_dir = tempfile.mkdtemp()
try:
tmp_file = tmp_dir + '/reach_finally_block'
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def test_close(batch_iter):
generator_exit_caught = False
try:
for batch in batch_iter:
yield batch
time.sleep(1.0) # avoid the function finish too fast.
except GeneratorExit as ge:
generator_exit_caught = True
raise ge
finally:
assert generator_exit_caught, "Generator exit exception was not caught."
open(tmp_file, 'a').close()
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": 1,
"spark.sql.execution.pandas.udf.buffer.size": 4}):
self.spark.range(10).repartition(1) \
.select(test_close(col("id"))).limit(2).collect()
# wait here because python udf worker will take some time to detect
# jvm side socket closed and then will trigger `GenerateExit` raised.
# wait timeout is 10s.
for i in range(100):
time.sleep(0.1)
if os.path.exists(tmp_file):
break
assert os.path.exists(tmp_file), "finally block not reached."
finally:
shutil.rmtree(tmp_dir)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime(2015, 11, 1, 0, 30),
datetime(2015, 11, 1, 1, 30),
datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
for udf_type in [PandasUDFType.SCALAR, PandasUDFType.SCALAR_ITER]:
foo_udf = pandas_udf(lambda x: x, 'timestamp', udf_type)
result = df.withColumn('time', foo_udf(df.time))
self.assertEquals(df.collect(), result.collect())
@unittest.skipIf(sys.version_info[:2] < (3, 5), "Type hints are supported from Python 3.5.")
def test_type_annotation(self):
# Regression test to check if type hints can be used. See SPARK-23569.
# Note that it throws an error during compilation in lower Python versions if 'exec'
# is not used. Also, note that we explicitly use another dictionary to avoid modifications
# in the current 'locals()'.
#
# Hyukjin: I think it's an ugly way to test issues about syntax specific in
# higher versions of Python, which we shouldn't encourage. This was the last resort
# I could come up with at that time.
_locals = {}
exec(
"import pandas as pd\ndef noop(col: pd.Series) -> pd.Series: return col",
_locals)
df = self.spark.range(1).select(pandas_udf(f=_locals['noop'], returnType='bigint')('id'))
self.assertEqual(df.first()[0], 0)
def test_mixed_udf(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of multiple UDFs and Pandas UDFs.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
@pandas_udf('int')
def f2_scalar(x):
assert type(x) == pd.Series
return x + 10
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def f2_iter(it):
for x in it:
assert type(x) == pd.Series
yield x + 10
@udf('int')
def f3(x):
assert type(x) == int
return x + 100
@pandas_udf('int')
def f4_scalar(x):
assert type(x) == pd.Series
return x + 1000
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def f4_iter(it):
for x in it:
assert type(x) == pd.Series
yield x + 1000
expected_chained_1 = df.withColumn('f2_f1', df['v'] + 11).collect()
expected_chained_2 = df.withColumn('f3_f2_f1', df['v'] + 111).collect()
expected_chained_3 = df.withColumn('f4_f3_f2_f1', df['v'] + 1111).collect()
expected_chained_4 = df.withColumn('f4_f2_f1', df['v'] + 1011).collect()
expected_chained_5 = df.withColumn('f4_f3_f1', df['v'] + 1101).collect()
expected_multi = df \
.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f4', df['v'] + 1000) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f4_f1', df['v'] + 1001) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f4_f2', df['v'] + 1010) \
.withColumn('f4_f3', df['v'] + 1100) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.withColumn('f4_f2_f1', df['v'] + 1011) \
.withColumn('f4_f3_f1', df['v'] + 1101) \
.withColumn('f4_f3_f2', df['v'] + 1110) \
.withColumn('f4_f3_f2_f1', df['v'] + 1111) \
.collect()
for f2, f4 in [(f2_scalar, f4_scalar), (f2_scalar, f4_iter),
(f2_iter, f4_scalar), (f2_iter, f4_iter)]:
# Test single expression with chained UDFs
df_chained_1 = df.withColumn('f2_f1', f2(f1(df['v'])))
df_chained_2 = df.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
df_chained_3 = df.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(df['v'])))))
df_chained_4 = df.withColumn('f4_f2_f1', f4(f2(f1(df['v']))))
df_chained_5 = df.withColumn('f4_f3_f1', f4(f3(f1(df['v']))))
self.assertEquals(expected_chained_1, df_chained_1.collect())
self.assertEquals(expected_chained_2, df_chained_2.collect())
self.assertEquals(expected_chained_3, df_chained_3.collect())
self.assertEquals(expected_chained_4, df_chained_4.collect())
self.assertEquals(expected_chained_5, df_chained_5.collect())
# Test multiple mixed UDF expressions in a single projection
df_multi_1 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(col('f1'))) \
.withColumn('f3_f1', f3(col('f1'))) \
.withColumn('f4_f1', f4(col('f1'))) \
.withColumn('f3_f2', f3(col('f2'))) \
.withColumn('f4_f2', f4(col('f2'))) \
.withColumn('f4_f3', f4(col('f3'))) \
.withColumn('f3_f2_f1', f3(col('f2_f1'))) \
.withColumn('f4_f2_f1', f4(col('f2_f1'))) \
.withColumn('f4_f3_f1', f4(col('f3_f1'))) \
.withColumn('f4_f3_f2', f4(col('f3_f2'))) \
.withColumn('f4_f3_f2_f1', f4(col('f3_f2_f1')))
# Test mixed udfs in a single expression
df_multi_2 = df \
.withColumn('f1', f1(col('v'))) \
.withColumn('f2', f2(col('v'))) \
.withColumn('f3', f3(col('v'))) \
.withColumn('f4', f4(col('v'))) \
.withColumn('f2_f1', f2(f1(col('v')))) \
.withColumn('f3_f1', f3(f1(col('v')))) \
.withColumn('f4_f1', f4(f1(col('v')))) \
.withColumn('f3_f2', f3(f2(col('v')))) \
.withColumn('f4_f2', f4(f2(col('v')))) \
.withColumn('f4_f3', f4(f3(col('v')))) \
.withColumn('f3_f2_f1', f3(f2(f1(col('v'))))) \
.withColumn('f4_f2_f1', f4(f2(f1(col('v'))))) \
.withColumn('f4_f3_f1', f4(f3(f1(col('v'))))) \
.withColumn('f4_f3_f2', f4(f3(f2(col('v'))))) \
.withColumn('f4_f3_f2_f1', f4(f3(f2(f1(col('v'))))))
self.assertEquals(expected_multi, df_multi_1.collect())
self.assertEquals(expected_multi, df_multi_2.collect())
def test_mixed_udf_and_sql(self):
df = self.spark.range(0, 1).toDF('v')
# Test mixture of UDFs, Pandas UDFs and SQL expression.
@udf('int')
def f1(x):
assert type(x) == int
return x + 1
def f2(x):
assert type(x) == Column
return x + 10
@pandas_udf('int')
def f3s(x):
assert type(x) == pd.Series
return x + 100
@pandas_udf('int', PandasUDFType.SCALAR_ITER)
def f3i(it):
for x in it:
assert type(x) == pd.Series
yield x + 100
expected = df.withColumn('f1', df['v'] + 1) \
.withColumn('f2', df['v'] + 10) \
.withColumn('f3', df['v'] + 100) \
.withColumn('f1_f2', df['v'] + 11) \
.withColumn('f1_f3', df['v'] + 101) \
.withColumn('f2_f1', df['v'] + 11) \
.withColumn('f2_f3', df['v'] + 110) \
.withColumn('f3_f1', df['v'] + 101) \
.withColumn('f3_f2', df['v'] + 110) \
.withColumn('f1_f2_f3', df['v'] + 111) \
.withColumn('f1_f3_f2', df['v'] + 111) \
.withColumn('f2_f1_f3', df['v'] + 111) \
.withColumn('f2_f3_f1', df['v'] + 111) \
.withColumn('f3_f1_f2', df['v'] + 111) \
.withColumn('f3_f2_f1', df['v'] + 111) \
.collect()
for f3 in [f3s, f3i]:
df1 = df.withColumn('f1', f1(df['v'])) \
.withColumn('f2', f2(df['v'])) \
.withColumn('f3', f3(df['v'])) \
.withColumn('f1_f2', f1(f2(df['v']))) \
.withColumn('f1_f3', f1(f3(df['v']))) \
.withColumn('f2_f1', f2(f1(df['v']))) \
.withColumn('f2_f3', f2(f3(df['v']))) \
.withColumn('f3_f1', f3(f1(df['v']))) \
.withColumn('f3_f2', f3(f2(df['v']))) \
.withColumn('f1_f2_f3', f1(f2(f3(df['v'])))) \
.withColumn('f1_f3_f2', f1(f3(f2(df['v'])))) \
.withColumn('f2_f1_f3', f2(f1(f3(df['v'])))) \
.withColumn('f2_f3_f1', f2(f3(f1(df['v'])))) \
.withColumn('f3_f1_f2', f3(f1(f2(df['v'])))) \
.withColumn('f3_f2_f1', f3(f2(f1(df['v']))))
self.assertEquals(expected, df1.collect())
# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
import numpy as np
path = tempfile.mkdtemp()
shutil.rmtree(path)
try:
self.spark.range(1).write.mode("overwrite").format('csv').save(path)
filesource_df = self.spark.read.option('inferSchema', True).csv(path).toDF('i')
datasource_df = self.spark.read \
.format("org.apache.spark.sql.sources.SimpleScanSource") \
.option('from', 0).option('to', 1).load().toDF('i')
datasource_v2_df = self.spark.read \
.format("org.apache.spark.sql.connector.SimpleDataSourceV2") \
.load().toDF('i', 'j')
c1 = pandas_udf(lambda x: x + 1, 'int')(lit(1))
c2 = pandas_udf(lambda x: x + 1, 'int')(col('i'))
f1 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(lit(1))
f2 = pandas_udf(lambda x: pd.Series(np.repeat(False, len(x))), 'boolean')(col('i'))
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c1)
expected = df.withColumn('c', lit(2))
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
result = df.withColumn('c', c2)
expected = df.withColumn('c', col('i') + 1)
self.assertEquals(expected.collect(), result.collect())
for df in [filesource_df, datasource_df, datasource_v2_df]:
for f in [f1, f2]:
result = df.filter(f)
self.assertEquals(0, result.count())
finally:
shutil.rmtree(path)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_scalar import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
ajaybhat/scikit-image
|
skimage/viewer/canvastools/linetool.py
|
43
|
6911
|
import numpy as np
from matplotlib import lines
from ...viewer.canvastools.base import CanvasToolBase, ToolHandles
__all__ = ['LineTool', 'ThickLineTool']
class LineTool(CanvasToolBase):
"""Widget for line selection in a plot.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, line_props=None, handle_props=None,
**kwargs):
super(LineTool, self).__init__(manager, on_move=on_move,
on_enter=on_enter,
on_release=on_release, **kwargs)
props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt')
props.update(line_props if line_props is not None else {})
self.linewidth = props['linewidth']
self.maxdist = maxdist
self._active_pt = None
x = (0, 0)
y = (0, 0)
self._end_pts = np.transpose([x, y])
self._line = lines.Line2D(x, y, visible=False, animated=True, **props)
self.ax.add_line(self._line)
self._handles = ToolHandles(self.ax, x, y,
marker_props=handle_props)
self._handles.set_visible(False)
self.artists = [self._line, self._handles.artist]
if on_enter is None:
def on_enter(pts):
x, y = np.transpose(pts)
print("length = %0.2f" %
np.sqrt(np.diff(x)**2 + np.diff(y)**2))
self.callback_on_enter = on_enter
self.manager.add_tool(self)
@property
def end_points(self):
return self._end_pts.astype(int)
@end_points.setter
def end_points(self, pts):
self._end_pts = np.asarray(pts)
self._line.set_data(np.transpose(pts))
self._handles.set_data(np.transpose(pts))
self._line.set_linewidth(self.linewidth)
self.set_visible(True)
self.redraw()
def hit_test(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return False
idx, px_dist = self._handles.closest(event.x, event.y)
if px_dist < self.maxdist:
self._active_pt = idx
return True
else:
self._active_pt = None
return False
def on_mouse_press(self, event):
self.set_visible(True)
if self._active_pt is None:
self._active_pt = 0
x, y = event.xdata, event.ydata
self._end_pts = np.array([[x, y], [x, y]])
def on_mouse_release(self, event):
if event.button != 1:
return
self._active_pt = None
self.callback_on_release(self.geometry)
self.redraw()
def on_move(self, event):
if event.button != 1 or self._active_pt is None:
return
if not self.ax.in_axes(event):
return
self.update(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update(self, x=None, y=None):
if x is not None:
self._end_pts[self._active_pt, :] = x, y
self.end_points = self._end_pts
@property
def geometry(self):
return self.end_points
class ThickLineTool(LineTool):
"""Widget for line selection in a plot.
The thickness of the line can be varied using the mouse scroll wheel, or
with the '+' and '-' keys.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
on_change : function
Function called whenever the line thickness is changed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
handle_props : dict
Marker properties for the handles (also see
:class:`matplotlib.lines.Line2D`).
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, manager, on_move=None, on_enter=None, on_release=None,
on_change=None, maxdist=10, line_props=None, handle_props=None):
super(ThickLineTool, self).__init__(manager,
on_move=on_move,
on_enter=on_enter,
on_release=on_release,
maxdist=maxdist,
line_props=line_props,
handle_props=handle_props)
if on_change is None:
def on_change(*args):
pass
self.callback_on_change = on_change
def on_scroll(self, event):
if not event.inaxes:
return
if event.button == 'up':
self._thicken_scan_line()
elif event.button == 'down':
self._shrink_scan_line()
def on_key_press(self, event):
if event.key == '+':
self._thicken_scan_line()
elif event.key == '-':
self._shrink_scan_line()
def _thicken_scan_line(self):
self.linewidth += 1
self.update()
self.callback_on_change(self.geometry)
def _shrink_scan_line(self):
if self.linewidth > 1:
self.linewidth -= 1
self.update()
self.callback_on_change(self.geometry)
if __name__ == '__main__': # pragma: no cover
from ... import data
from ...viewer import ImageViewer
image = data.camera()
viewer = ImageViewer(image)
h, w = image.shape
line_tool = ThickLineTool(viewer)
line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])
viewer.show()
|
bsd-3-clause
|
vybstat/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
268
|
1091
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Pandas_numpy/source/pandas/io/formats/common.py
|
16
|
1094
|
# -*- coding: utf-8 -*-
"""
Common helper methods used in different submodules of pandas.io.formats
"""
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True for x in levels[0]]
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
|
mit
|
joernhees/scikit-learn
|
sklearn/manifold/locally_linear.py
|
6
|
26492
|
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg : float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``solver`` == 'arpack'.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``solver`` == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = stable_cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``eigen_solver`` == 'arpack'.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X, dtype=float)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
bsd-3-clause
|
jimsrc/seatos
|
etc/n_CR/individual/check_pos.py
|
1
|
2610
|
#!/usr/bin/env ipython
from pylab import *
#from load_data import sh, mc, cr
import func_data as fd
import share.funcs as ff
#import CythonSrc.funcs as ff
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from os import environ as env
from os.path import isfile, isdir
from h5py import File as h5
#++++++++++++++++++++++++++++++++++++++++++++++++++++
class Lim:
def __init__(self, min_, max_, n):
self.min = min_
self.max = max_
self.n = n
def delta(self):
return (self.max-self.min) / (1.0*self.n)
"""
dir_inp_sh = '{dir}/sheaths.icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
dir_inp_mc = '{dir}/icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_sh = '{dir}/sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_mc = '{dir}/mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
fname_inp_part = 'MCflag0.1.2.2H_2before.4after_fgap0.2_WangNaN' # '_vlo.100.0.vhi.375.0_CRs.Auger_BandScals.txt'
#fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
"""
#CRstr = 'CRs.Auger_BandScals'
#CRstr = 'CRs.Auger_BandMuons'
CRstr = 'CRs.Auger_scals'
vlo, vhi = 100., 375.
#vlo, vhi = 375., 450.
#vlo, vhi = 450., 3000.
dir_inp = '../out/individual'
fname_inp = '{dir}/_nCR_vlo.{lo:5.1f}.vhi.{hi:4.1f}_{name}.h5' .format(dir=dir_inp, lo=vlo, hi=vhi, name=CRstr)
#++++++++++++++++++++++++++++++++++++++++++++++++ ajuste
#--- parameter boundaries && number of evaluations
fi = h5(fname_inp, 'r') # input
fpar = {} # fit parameters
for pname in fi.keys():
if pname=='grids':
#fpar[pname] = {}
for pname_ in fi['grids'].keys():
# grilla de exploracion con
# formato: [min, max, delta, nbin]
fpar['grids/'+pname_] = fi['grids/'+pname_][...]
continue
fpar[pname] = fi[pname].value
#fi[pname] = fit.par[pname]
#print fpar
print " ---> vlo, vhi: ", vlo, vhi
for nm in fpar.keys():
if nm.startswith('grids'):
continue
min_, max_ = fpar['grids/'+nm][0], fpar['grids/'+nm][1]
delta = fpar['grids/'+nm][2]
v = fpar[nm]
pos = (v - min_)/(max_-min_)
d = delta/(max_-min_)
print nm+': ', pos, d, '; \t', v
"""
#--- slice object
rranges = (
slice(tau.min, tau.max, tau.delta()),
slice(q.min, q.max, q.delta()),
slice(off.min, off.max, off.delta()),
slice(bp.min, bp.max, bp.delta()),
slice(bo.min, bo.max, bo.delta()),
)
"""
#EOF
|
mit
|
vamsirajendra/nupic
|
nupic/research/monitor_mixin/monitor_mixin_base.py
|
27
|
5512
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title)
return plot
|
agpl-3.0
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/tests/io/msgpack/test_read_size.py
|
22
|
1870
|
"""Test Unpacker's read_array_header and read_map_header methods"""
from pandas.io.msgpack import packb, Unpacker, OutOfData
UnexpectedTypeException = ValueError
def test_read_array_header():
unpacker = Unpacker()
unpacker.feed(packb(['a', 'b', 'c']))
assert unpacker.read_array_header() == 3
assert unpacker.unpack() == b'a'
assert unpacker.unpack() == b'b'
assert unpacker.unpack() == b'c'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_read_map_header():
unpacker = Unpacker()
unpacker.feed(packb({'a': 'A'}))
assert unpacker.read_map_header() == 1
assert unpacker.unpack() == B'a'
assert unpacker.unpack() == B'A'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_incorrect_type_array():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_map():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_correct_type_nested_array():
unpacker = Unpacker()
unpacker.feed(packb({'a': ['b', 'c', 'd']}))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_nested_map():
unpacker = Unpacker()
unpacker.feed(packb([{'a': 'b'}]))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
|
mit
|
pv/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
40
|
16837
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
|
bsd-3-clause
|
Winand/pandas
|
pandas/tests/series/test_internals.py
|
17
|
12814
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
from numpy import nan
import numpy as np
from pandas import Series
from pandas.core.indexes.datetimes import Timestamp
import pandas._libs.lib as lib
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
class TestSeriesInternals(object):
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'],
dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True,
convert_numeric=False)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'),
Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r.convert_objects(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0', '2.0', '3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0, 2.0, 3.0])
assert_series_equal(results, expected)
results = s._convert(True, False, True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0,
0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False, numeric=True, timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True, True, False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT,
Timestamp('20010104'), Timestamp('20010105')],
dtype='M8[ns]')
result = s2._convert(datetime=True, numeric=False, timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r._convert(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0', '2'])
pytest.raises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
|
bsd-3-clause
|
dpaiton/OpenPV
|
pv-core/analysis/python/plot_strong_neurons.py
|
1
|
1748
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
extended = False
a1 = rs.PVReadSparse(sys.argv[1], extended)
end = int(sys.argv[2])
step = int(sys.argv[3])
begin = int(sys.argv[4])
endtest = int(sys.argv[2])
steptest = int(sys.argv[3])
begintest = int(sys.argv[4])
atest = rs.PVReadSparse(sys.argv[5], extended)
zerange = end
where = []
count = 0
for endtest in range(begintest+steptest, steptest+1, steptest):
Atest = atest.avg_activity(begintest, endtest)
nmax = np.max(Atest)
lenofo = len(Atest)
for i in range(lenofo):
for j in range(lenofo):
if Atest[i,j] > (0.7 * nmax):
if count == 0:
where = [i,j]
else:
where = np.vstack((where, [i, j]))
count+=1
print "shape of where = ", np.shape(where)
print np.shape(where)[0]
a1.rewind()
A1t = np.zeros((1, np.shape(where)[0]))
for k in range(zerange):
A1 = a1.next_record()
A1t = np.zeros((1, np.shape(where)[0]))
for g in range(np.shape(where)[0]):
w = where[g]
i = w[0]
j = w[1]
for h in range(len(A1)):
if A1[h] == ((lenofo * i) + j):
A1t[0,g] = 1
if k == 0:
A1p = np.average(A1t)
else:
A1p = np.append(A1p, np.average(A1t))
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='darkslategray')
ax.plot(np.arange(len(A1p)), A1p, '-o', color='y')
ax.set_xlabel('time (0.5 ms) num of neurons = %d' %(np.shape(where)[0]))
ax.set_ylabel('Avg Firing Rate')
ax.set_title('Average High Activity')
ax.grid(True)
plt.show()
|
epl-1.0
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sklearn/metrics/pairwise.py
|
3
|
23281
|
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel::
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD Style.
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import safe_asarray
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guarenteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guarenteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = safe_asarray(X)
X = Y = atleast2d_or_csr(X, dtype=np.float)
else:
X = safe_asarray(X)
Y = safe_asarray(Y)
X = atleast2d_or_csr(X, dtype=np.float)
Y = atleast2d_or_csr(Y, dtype=np.float)
if len(X.shape) < 2:
raise ValueError("X is required to be at least two dimensional.")
if len(Y.shape) < 2:
raise ValueError("Y is required to be at least two dimensional.")
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
# TODO: a faster Cython implementation would do the clipping of negative
# values in a single pass over the output matrix.
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise Exception("X and Y should have the same number of features!")
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((n_samples_X * n_samples_Y, n_features_X))
return D
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=0, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=0, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=0):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(X, Y) = exp(-gamma ||X-Y||^2)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
gamma : float
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
# Helper functions - distance
pairwise_distance_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'cityblock': manhattan_distances,
}
def distance_metrics():
""" Valid metrics for pairwise_distances
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=========== ====================================
metric Function
=========== ====================================
'cityblock' sklearn.pairwise.manhattan_distances
'euclidean' sklearn.pairwise.euclidean_distances
'l1' sklearn.pairwise.manhattan_distances
'l2' sklearn.pairwise.euclidean_distances
'manhattan' sklearn.pairwise.manhattan_distances
=========== ====================================
"""
return pairwise_distance_functions
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to those
metrics listed in pairwise.pairwise_distance_functions.
Valid values for metric are:
- from scikit-learn: ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note in the case of 'euclidean' and 'cityblock' (which are valid
scipy.spatial.distance metrics), the values will use the scikit-learn
implementation, which is faster and has support for sparse matrices.
For a verbose description of the metrics from scikit-learn, see the
__doc__ of the sklearn.pairwise.distance_metrics function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.pairwise_distance_functions.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in pairwise_distance_functions:
func = pairwise_distance_functions[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
pairwise_kernel_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'linear': linear_kernel
}
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ==================================
metric Function
============ ==================================
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
============ ==================================
"""
return pairwise_kernel_functions
kernel_params = {
"rbf": set(("gamma",)),
"sigmoid": set(("gamma", "coef0")),
"polynomial": set(("gamma", "degree", "coef0")),
"poly": set(("gamma", "degree", "coef0")),
"linear": ()
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
""" Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.pairwise_kernel_functions.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in pairwise_kernel_functions:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds \
if k in kernel_params[metric])
func = pairwise_kernel_functions[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise AttributeError("Unknown metric %s" % metric)
|
agpl-3.0
|
cainiaocome/scikit-learn
|
sklearn/datasets/tests/test_base.py
|
205
|
5878
|
import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
|
bsd-3-clause
|
sharifulgeo/networkx
|
examples/drawing/sampson.py
|
40
|
1379
|
#!/usr/bin/env python
"""
Sampson's monastery data.
Shows how to read data from a zip file and plot multiple frames.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import zipfile, cStringIO
import networkx as nx
import matplotlib.pyplot as plt
zf = zipfile.ZipFile('sampson_data.zip') # zipfile object
e1=cStringIO.StringIO(zf.read('samplike1.txt')) # read info file
e2=cStringIO.StringIO(zf.read('samplike2.txt')) # read info file
e3=cStringIO.StringIO(zf.read('samplike3.txt')) # read info file
G1=nx.read_edgelist(e1,delimiter='\t')
G2=nx.read_edgelist(e2,delimiter='\t')
G3=nx.read_edgelist(e3,delimiter='\t')
pos=nx.spring_layout(G3,iterations=100)
plt.clf()
plt.subplot(221)
plt.title('samplike1')
nx.draw(G1,pos,node_size=50,with_labels=False)
plt.subplot(222)
plt.title('samplike2')
nx.draw(G2,pos,node_size=50,with_labels=False)
plt.subplot(223)
plt.title('samplike3')
nx.draw(G3,pos,node_size=50,with_labels=False)
plt.subplot(224)
plt.title('samplike1,2,3')
nx.draw(G3,pos,edgelist=G3.edges(),node_size=50,with_labels=False)
nx.draw_networkx_edges(G1,pos,alpha=0.25)
nx.draw_networkx_edges(G2,pos,alpha=0.25)
plt.savefig("sampson.png") # save as png
plt.show() # display
|
bsd-3-clause
|
ch3ll0v3k/scikit-learn
|
examples/plot_kernel_approximation.py
|
262
|
8004
|
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
Hejtman/ultraGarden
|
server/test.py
|
1
|
1338
|
import matplotlib.pyplot as plt
import numpy as np
from gardener import Gardener
def plot_fogging_function(begin, end, step):
x = list(np.arange(begin, end, step))
y = list(Gardener.compute_period('FOGGING', xx) for xx in x)
table = list((str(xx), str(Gardener.compute_period('FOGGING', xx))) for xx in list(range(0, 35, 5)))
plt.plot(x, y)
plt.title('Fogging interval depends on current temperature')
plt.xlabel('temperature (°C)')
plt.ylabel('fogging interval (minutes)')
plt.table(cellText=table, cellLoc='center', loc='bottom', bbox=[0.45, 0.45, 0.3, 0.4])
plt.ylim(ymax=60)
plt.ylim(ymin=0)
plt.show()
def plot_watering_function(begin, end, step):
x = list(np.arange(begin, end, step))
y = list(Gardener.compute_period('WATERING', xx) for xx in x)
table = list((str(xx), str(Gardener.compute_period('WATERING',xx))) for xx in list(range(0, 35, 5)))
plt.plot(x, y)
plt.title('Watering interval depends on current temperature')
plt.xlabel('temperature (°C)')
plt.ylabel('watering interval (minutes)')
plt.table(cellText=table, cellLoc='center', loc='bottom', bbox=[0.45, 0.45, 0.3, 0.4])
plt.ylim(ymax=60)
plt.ylim(ymin=0)
plt.show()
plot_fogging_function(begin=0, end=30, step=0.01)
plot_watering_function(begin=0, end=30, step=0.01)
|
gpl-2.0
|
z01nl1o02/tests
|
imbalance-dataset-op/ops/clusterOP.py
|
1
|
2165
|
import os,shutil
import os.path as osp
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from PIL import Image
from collections import defaultdict
from randomOP import _with_replace
def _run_cluster(origin_list, cluster_num = 8, batch_size=100,resize=(64,64)):
clf = MiniBatchKMeans(n_clusters=cluster_num,batch_size=batch_size)
def next_batch(allfiles,batch_size):
imgs = []
inds = []
for ind,(path,label) in enumerate(allfiles):
img = Image.open(path).convert("L")
img = img.resize(size=resize,Image.ANTIALIAS)
img = np.reshape(np.array(img),(1,-1)).astype(np.float32) / 255.0
imgs.append(img)
inds.append(ind)
if len(imgs) >= batch_size:
yield np.vstack(imgs), inds
imgs = []
inds = []
if len(inds) > 0:
return np.vstack(imgs), inds
for _,batch in next_batch(origin_list,batch_size):
clf.partial_fit(batch)
cluster_dict = defaultdict(list)
for inds, batch in next_batch(origin_list, batch_size):
Ys = clf.predict(batch)
for y, ind in zip(Ys, inds):
path,label = origin_list[ind]
cluster_dict.setdefault(y,[]).append((path,label))
return cluster_dict
def sampling(input_file, output_dir, req_num, resize = (96,96)):
samples = []
labels = set([])
with open(input_file, 'r') as f:
for line in f:
path, label = line.strip().split(',')
samples.append((path,label))
labels.add(label)
for label in labels:
samples_one_label = list( filter(lambda x: x[1] == label, samples) )
cluster_num = 8
if len(samples_one_label) < 100:
_with_replace(samples_one_label,output_dir,len(samples_one_label) * req_num // len(samples))
else:
cluster_info = _run_cluster(samples_one_label,cluster_num=cluster_num,batch_size=100,resize=resize)
for cluster in cluster_info.keys():
_with_replace(cluster_info[cluster],output_dir,len(cluster_info[cluster]) * req_num // len(samples))
return
|
gpl-2.0
|
JsNoNo/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
242
|
5885
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
oemof/examples
|
oemof_examples/oemof.solph/v0.3.x/storage_investment/v4_invest_optimize_all_technologies_with_fossil_share.py
|
2
|
7106
|
# -*- coding: utf-8 -*-
"""
General description
-------------------
This example shows how to perform a capacity optimization for
an energy system with storage. The following energy system is modeled:
input/output bgas bel
| | | |
| | | |
wind(FixedSource) |------------------>| |
| | | |
pv(FixedSource) |------------------>| |
| | | |
gas_resource |--------->| | |
(Commodity) | | | |
| | | |
demand(Sink) |<------------------| |
| | | |
| | | |
pp_gas(Transformer) |<---------| | |
|------------------>| |
| | | |
storage(Storage) |<------------------| |
|------------------>| |
The example exists in four variations. The following parameters describe
the main setting for the optimization variation 4:
- optimize wind, pv, and storage
- set investment cost for wind, pv and storage
- set a fossil share
Results show now the achievement of 80% renewable energy share
by solely installing a little more wind and pv (compared to
variation 2). Storage is not installed.
Have a look at different parameter settings. There are four variations
of this example in the same folder.
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof. Install by:
pip install 'oemof>=0.3,<0.4'
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
###############################################################################
# Imports
###############################################################################
# Default logger of oemof
from oemof.tools import logger
from oemof.tools import economics
import oemof.solph as solph
from oemof.outputlib import processing, views
import logging
import os
import pandas as pd
import pprint as pp
number_timesteps = 8760
##########################################################################
# Initialize the energy system and read/calculate necessary parameters
##########################################################################
logger.define_logging()
logging.info('Initialize the energy system')
date_time_index = pd.date_range('1/1/2012', periods=number_timesteps,
freq='H')
energysystem = solph.EnergySystem(timeindex=date_time_index)
# Read data file
full_filename = os.path.join(os.path.dirname(__file__),
'storage_investment.csv')
data = pd.read_csv(full_filename, sep=",")
fossil_share = 0.2
consumption_total = data['demand_el'].sum()
# If the period is one year the equivalent periodical costs (epc) of an
# investment are equal to the annuity. Use oemof's economic tools.
epc_wind = economics.annuity(capex=1000, n=20, wacc=0.05)
epc_pv = economics.annuity(capex=1000, n=20, wacc=0.05)
epc_storage = economics.annuity(capex=1000, n=20, wacc=0.05)
##########################################################################
# Create oemof objects
##########################################################################
logging.info('Create oemof objects')
# create natural gas bus
bgas = solph.Bus(label="natural_gas")
# create electricity bus
bel = solph.Bus(label="electricity")
energysystem.add(bgas, bel)
# create excess component for the electricity bus to allow overproduction
excess = solph.Sink(label='excess_bel', inputs={bel: solph.Flow()})
# create source object representing the natural gas commodity (annual limit)
gas_resource = solph.Source(label='rgas', outputs={bgas: solph.Flow(
nominal_value=fossil_share * consumption_total / 0.58
* number_timesteps / 8760, summed_max=1)})
# create fixed source object representing wind power plants
wind = solph.Source(label='wind', outputs={bel: solph.Flow(
actual_value=data['wind'], fixed=True,
investment=solph.Investment(ep_costs=epc_wind))})
# create fixed source object representing pv power plants
pv = solph.Source(label='pv', outputs={bel: solph.Flow(
actual_value=data['pv'], fixed=True,
investment=solph.Investment(ep_costs=epc_pv))})
# create simple sink object representing the electrical demand
demand = solph.Sink(label='demand', inputs={bel: solph.Flow(
actual_value=data['demand_el'], fixed=True, nominal_value=1)})
# create simple transformer object representing a gas power plant
pp_gas = solph.Transformer(
label="pp_gas",
inputs={bgas: solph.Flow()},
outputs={bel: solph.Flow(nominal_value=10e10, variable_costs=0)},
conversion_factors={bel: 0.58})
# create storage object representing a battery
storage = solph.components.GenericStorage(
label='storage',
inputs={bel: solph.Flow(variable_costs=0.0001)},
outputs={bel: solph.Flow()},
loss_rate=0.00, initial_storage_level=0,
invest_relation_input_capacity=1/6,
invest_relation_output_capacity=1/6,
inflow_conversion_factor=1, outflow_conversion_factor=0.8,
investment=solph.Investment(ep_costs=epc_storage),
)
energysystem.add(excess, gas_resource, wind, pv, demand, pp_gas, storage)
##########################################################################
# Optimise the energy system
##########################################################################
logging.info('Optimise the energy system')
# initialise the operational model
om = solph.Model(energysystem)
# if tee_switch is true solver messages will be displayed
logging.info('Solve the optimization problem')
om.solve(solver='cbc', solve_kwargs={'tee': True})
##########################################################################
# Check and plot the results
##########################################################################
# check if the new result object is working for custom components
results = processing.results(om)
custom_storage = views.node(results, 'storage')
electricity_bus = views.node(results, 'electricity')
meta_results = processing.meta_results(om)
pp.pprint(meta_results)
my_results = electricity_bus['scalars']
# installed capacity of storage in GWh
my_results['storage_invest_GWh'] = (results[(storage, None)]
['scalars']['invest']/1e6)
# installed capacity of wind power plant in MW
my_results['wind_invest_MW'] = (results[(wind, bel)]
['scalars']['invest']/1e3)
# installed capacity of pv power plant in MW
my_results['pv_invest_MW'] = (results[(pv, bel)]
['scalars']['invest']/1e3)
# resulting renewable energy share
my_results['res_share'] = (1 - results[(pp_gas, bel)]
['sequences'].sum()/results[(bel, demand)]
['sequences'].sum())
pp.pprint(my_results)
|
gpl-3.0
|
boada/ICD
|
sandbox/plot_icd_mass_box_width.py
|
1
|
6289
|
#!/usr/bin/env python
import pylab as pyl
import cPickle as pickle
from astLib import astCalc
def plot_icd_vs_mass():
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: 0.06 * galaxy.halflight *\
astCalc.da(galaxy.z)*1000/206265. > 2, galaxies)
# Make figure
f1 = pyl.figure(1, figsize=(6,4))
f1s1 = f1.add_subplot(121)
f1s2 = f1.add_subplot(122)
# f1s3 = f1.add_subplot(223)
# f1s4 = f1.add_subplot(224)
#Upper and Lower limit arrow verts
arrowup_verts = [[0.,0.], [-1., -1], [0.,0.],
[0.,-2.], [0.,0.], [1,-1], [0,0]]
#arrowdown_verts = [[0.,0.], [-1., 1], [0.,0.],
# [0.,2.], [0.,0.], [1, 1]]
for galaxy in galaxies:
if galaxy.ston_I > 30. and galaxy.ICD_IH != None:
# Add arrows first
if galaxy.ICD_IH > 0.5:
f1s1.scatter(galaxy.Mass, 0.5*100, s=100, marker=None,
verts=arrowup_verts)
else:
f1s1.scatter(galaxy.Mass, galaxy.ICD_IH * 100, c='0.8',
marker='o', s=25, edgecolor='0.8')
f1s2.scatter(galaxy.Mass, galaxy.ICD_IH * 100, c='0.8',
marker='o', s=25, edgecolor='0.8')
'''
if galaxy.ston_J > 30. and galaxy.ICD_JH != None:
# Add arrows first
if galaxy.ICD_JH > 0.12:
f1s3.scatter(galaxy.Mass, 12, s=100, marker=None,
verts=arrowup_verts)
else:
f1s3.scatter(galaxy.Mass, galaxy.ICD_JH * 100, c='0.8',
marker='o', s=25, edgecolor='0.8')
f1s4.scatter(galaxy.Mass, galaxy.ICD_JH * 100, c='0.8',
marker='o', s=25, edgecolor='0.8')
'''
# Add the box and whiskers
galaxies2 = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
galaxies2 = pyl.asarray(galaxies2)
x = [galaxy.Mass for galaxy in galaxies2]
ll = 8.5
ul= 12
#bins_x =pyl.arange(8.5, 12.5, 0.5)
bins_x =pyl.array([8.5, 9., 9.5, 10., 10.5, 11., 12.])
grid = []
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x>=xmin, x<xmax)]
grid.append(galaxies2.compress(cond))
icd = []
for i in range(len(grid)):
icd.append([galaxy.ICD_IH*100 for galaxy in grid[i]])
from boxplot_percentile_width import percentile_box_plot as pbp
#bp1 = f1s1.boxplot(icd, positions=pyl.delete(bins_x,-1)+0.25, sym='')
width = pyl.diff(bins_x)
index = pyl.delete(bins_x,-1) + 0.25
index[-1] = index[-1] + 0.25
pbp(f1s1, icd, indexer=list(index), width=width)
pbp(f1s2, icd, indexer=list(index), width=width)
'''
# Add the box and whiskers
galaxies2 = filter(lambda galaxy: galaxy.ston_J > 30., galaxies)
galaxies2 = pyl.asarray(galaxies2)
x = [galaxy.Mass for galaxy in galaxies2]
ll = 8.5
ul= 12
#bins_x =pyl.linspace(ll, ul, 7)
#bins_x =pyl.arange(8.5, 12.5, 0.5)
bins_x =pyl.array([8.5, 9., 9.5, 10., 10.5, 11., 12.])
grid = []
for i in range(bins_x.size-1):
xmin = bins_x[i]
xmax = bins_x[i+1]
cond=[cond1 and cond2 for cond1, cond2 in zip(x>=xmin, x<xmax)]
grid.append(galaxies2.compress(cond))
icd = []
for i in range(len(grid)):
icd.append([galaxy.ICD_JH*100 for galaxy in grid[i]])
#bp2 = f1s2.boxplot(icd, positions=pyl.delete(bins_x,-1)+0.25, sym='')
width = pyl.diff(bins_x)
index = pyl.delete(bins_x,-1) + 0.25
index[-1] = index[-1] + 0.25
pbp(f1s3, icd, indexer=list(index), width=width)
pbp(f1s4, icd, indexer=list(index), width=width)
'''
# Finish Plot
# Tweak colors on the boxplot
#pyl.setp(bp1['boxes'], lw=2)
#pyl.setp(bp1['whiskers'], lw=2)
#pyl.setp(bp1['medians'], lw=2)
#pyl.setp(bp2['boxes'], lw=2)
#pyl.setp(bp2['whiskers'], lw=2)
#pyl.setp(bp2['medians'], lw=2)
#pyl.setp(bp['fliers'], color='#8CFF6F', marker='+')
#f1s1.axvspan(7.477, 9, facecolor='#FFFDD0', ec='None', zorder=0)
#f1s1.axvspan(11, 12, facecolor='#FFFDD0', ec='None', zorder=0)
#f1s2.axvspan(7.477, 9, facecolor='#FFFDD0', ec='None', zorder=0)
#f1s2.axvspan(11, 12, facecolor='#FFFDD0', ec='None', zorder=0)
f1s1.set_xlim(8,12)
f1s2.set_xlim(8,12)
# f1s3.set_xlim(8,12)
# f1s4.set_xlim(8,12)
f1s1.set_ylim(-10,50)
f1s2.set_ylim(0,15)
# f1s3.set_ylim(-5,12)
# f1s4.set_ylim(-1,3)
f1s1.set_xticks([8,9,10,11,12])
# f1s1.set_xticklabels([])
f1s2.set_xticks([8,9,10,11,12])
# f1s2.set_xticklabels([])
# f1s3.set_xticks([8,9,10,11,12])
# f1s4.set_xticks([8,9,10,11,12])
# f1s4.set_yticks([-1, 0, 1, 2, 3])
f1s1.set_ylabel(r"$\xi[i_{775},H_{160}]$ (%)")
f1s1.set_xlabel(r"Log Mass ($M_{\odot})$")
f1s2.set_xlabel(r"Log Mass ($M_{\odot})$")
# f1s3.set_ylabel(r"$\xi[J_{125},H_{160}]$ (%)")
import matplotlib.font_manager
line1 = pyl.Line2D([], [], marker='o', mfc='0.8', mec='0.8', markersize=8,
linewidth=0)
line2 = pyl.Line2D([], [], marker='s', mec='#348ABD', mfc='None',
markersize=10, linewidth=0, markeredgewidth=2)
line3 = pyl.Line2D([], [], color='#A60628', linewidth=2)
prop = matplotlib.font_manager.FontProperties(size='small')
pyl.figlegend((line1, line2, line3), ('Data', 'Quartiles',
'Medians'), 'lower center', prop=prop, ncol=3)
from matplotlib.patches import ConnectionPatch
xy = (12, 15)
xy2 = (8, 15)
con = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data',
axesA=f1s1, axesB=f1s2)
xy = (12, 0)
xy2 = (8, 0)
con2 = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data',
axesA=f1s1, axesB=f1s2)
f1s1.add_artist(con)
f1s1.add_artist(con2)
xy = (12, 3)
xy2 = (8, 3)
# con = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data',
# axesA=f1s3, axesB=f1s4)
xy = (12, -1)
xy2 = (8, -1)
# con2 = ConnectionPatch(xyA=xy, xyB=xy2, coordsA='data', coordsB='data',
# axesA=f1s3, axesB=f1s4)
# f1s3.add_artist(con)
# f1s3.add_artist(con2)
pyl.draw()
pyl.show()
if __name__=='__main__':
plot_icd_vs_mass()
|
mit
|
coupdair/pyoptools
|
pyoptools/misc/pmisc/misc.py
|
9
|
18011
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as N
from numpy import array, sin, cos, float64, dot, float_, sqrt, ceil, floor, dot, \
meshgrid, zeros, zeros_like, where, nan, pi, isnan, nonzero, rint, \
linspace, arange, argwhere
from numpy.ma import is_masked, MaskedArray
from numpy.ma import array as ma_array
#from enthought.traits.api import Trait, TraitHandler
from scipy import interpolate
from pylab import griddata, meshgrid
'''Auxiliary functions and classes
'''
#~ class TraitUnitVector(TraitHandler):
#~ ''' Class to define unit vector trait
#~
#~ Description:
#~
#~ This class defines a unit vector. If the value assigned is not a unit
#~ vector, it gets automaticaly normalized
#~ '''
#~
#~ def validate(self, object, name, value):
#~ try:
#~ avalue=array(value)
#~ except:
#~ self.error(object, name, value)
#~
#~ if len(avalue.shape)!=1 or avalue.shape[0]!=3:
#~ return self.error(object, name, avalue)
#~
#~ avalue=array(avalue/sqrt(dot(avalue,avalue)))
#~ return avalue
#~
#~ # Trait to define a unit vector based on the unit vector trait
#~ UnitVector = Trait(array([0,0,1], float_),TraitUnitVector())
#~ print "Nota: Hay que revisar las convenciones de las rotaciones para que queden\n\r "\
#~ "consistentes en x,y,z. Me parece que hay un error en el signo de la \n\r rotacion"\
#~ "al rededor de alguno de los ejes. Modulo misc \n\r"\
#~ "si no estoy mal el error esta en la rotacion respecto a los ejez Y y Z"
def rot_x(tx):
'''Returns the transformation matrix for a rotation around the X axis
'''
return array([[1.,0. ,0. ],
[0.,cos(tx),-sin(tx)],
[0.,sin(tx), cos(tx)]]).astype(float64)
def rot_y(ty):
'''Returns the transformation matrix for a rotation around the Y axis
'''
return array([[ cos(ty),0. ,sin(ty) ],
[ 0. ,1 ,0. ],
[-sin(ty),0. ,cos(ty) ]]).astype(float64)
def rot_z(tz):
'''Returns the transformation matrix for a rotation around the Z axis
'''
return array([[ cos(tz),-sin(tz),0. ],
[ sin(tz), cos(tz),0. ],
[ 0. ,0. ,1. ]]).astype(float64)
#~ def rot_mat(r):
#~ '''Returns the transformation matrix for a rotation around the Z,Y,X axes
#~
#~ The rotation is made first around the Z axis, then around the Y axis, and
#~ finally around the X axis.
#~
#~ Parameters
#~
#~ r= (rx,ry,rz)
#~ '''
#~
#~ c=cos(r)
#~ s=sin(r)
#~
#~ rx=array([[1. , 0., 0.],
#~ [0. , c[0],-s[0]],
#~ [0. , s[0], c[0]]])
#~
#~ ry=array([[ c[1], 0., s[1]],
#~ [ 0., 1., 0.],
#~ [-s[1], 0., c[1]]])
#~
#~
#~ rz=array([[ c[2],-s[2], 0.],
#~ [ s[2], c[2], 0.],
#~ [ 0., 0., 1.]])
#~
#~
#~ tm=dot(rz,dot(ry,rx))
#~
#~ return tm
# To improve speed, this routine was moved to cmisc.pyx
#~ def rot_mat_i(r):
#~ '''Returns the inverse transformation matrix for a rotation around the Z,Y,X axes
#~
#~ Parameters
#~
#~ r= (rx,ry,rz)
#~ '''
#~
#~ c=cos(r)
#~ s=sin(r)
#~
#~ rx=array([[ 1., 0., 0.],
#~ [ 0., c[0], s[0]],
#~ [ 0.,-s[0], c[0]]])
#~
#~ ry=array([[ c[1], 0.,-s[1]],
#~ [ 0., 1., 0.],
#~ [ s[1], 0., c[1]]])
#~
#~
#~ rz=array([[ c[2], s[2], 0.],
#~ [-s[2], c[2], 0.],
#~ [ 0., 0., 1.]])
#~
#~ # Nota: se hizo una prueba para optimizar escribirndo la expresión del producto
#~ # escalar, y el resultado fue considerablemente mas lento, toca revisar
#~
#~
#~ return dot(rx,dot(ry,rz))
def cross(a,b):
'''3D Vector product producto vectorial '''
x1,y1,z1=a
x2,y2,z2=b
return array((y1*z2-y2*z1,x2*z1-x1*z2,x1*y2-x2*y1))
def wavelength2RGB(wl):
'''Function to aproximate and RGB tuple from the wavelength value
Parameter:
wavelength wavelength in um
if the wavelength is outside the visible spectrum returns (0,0,0)
Original code fount at:
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
R,G,B=0.,0.,0.
if (wl>=.380) & (wl<.440):
R = -1.*(wl-.440)/(.440-.380)
G = 0.
B = 1.
if (wl>=.440) & (wl<.490):
R = 0.
G = (wl-.440)/(.490-.440)
B = 1.
if (wl>=.490) & (wl<.510):
R = 0.
G = 1.
B = -1.*(wl-.510)/(.510-.490)
if (wl>=.510) & (wl<.580):
R = (wl-.510)/(.580-.510)
G = 1.
B = 0.
if (wl>=.580) & (wl<.645):
R = 1.
G = -1.*(wl-.645)/(.645-.580)
B = 0.
if (wl>=.645) & (wl < .780):
R = 1.
G = 0.
B = 0.
# LET THE INTENSITY FALL OFF NEAR THE VISION LIMITS
if (wl>=.700):
sss =.3+.7* (.780-wl)/(.780-.700)
elif (wl < .420) :
sss=.3+.7*(wl-.380)/(.420-.380)
else :
sss=1
R=R*sss
G=G*sss
B=B*sss
return (R,G,B)
def matrix_interpolation(M, i, j, type="bilinear"):
"""Returns the interpolated value of a matrix, when the indices i,j are floating
point numbers.
M
Matrix to interpolate
i,j
Indices to interpolate
type
Interpolation type. supported types: nearest,bilinear
"""
mi, mj=M.shape
if i<0 or i>mi-2 or j<0 or j>mj-2:
raise IndexError("matrix Indexes out of range")
# Allowed interpolation types
inter_types=["nearest","bilinear", ]
if not type in inter_types:
raise ValueError("Interpolation type not allowed. The allowed types"\
" are: {0}".format(inter_types))
if type=="nearest":
iri=int(round(i))
irj=int(round(j))
return M[iri, irj]
elif type=="bilinear":
i_s, j_s=floor((i, j))
#calc 1
m=M[i_s:i_s+2, j_s:j_s+2]
iv=array([1-(i-i_s), i-i_s])
jv=array([[1-(j-j_s),], [j-j_s, ]])
return dot(iv, dot(m, jv))[0]
#dx=i-i_s
#dy=j-j_s
##print i, j, i_s, j_s, dx, dy
#p1=dx*dy*M[i_s, j_s]
#p2=(1.-dx)*dy*M[i_s+1, j_s]
#p3=dx*(1.-dy)*M[i_s, j_s+1]
#p4=(1.-dx)*(1.-dy)*M[i_s+1, j_s+1]
#return p1+ p2+ p3+ p4
print "error"
return 1.
def hitlist2int(x, y, z, xi, yi):
"""Function that estimates an intensity distribution on a plane from a
ray hitlist
"""
import matplotlib.delaunay as delaunay
from pylab import griddata, meshgrid
from scipy import interpolate
#if xi.ndim != yi.ndim:
# raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
#if xi.ndim != 1 and xi.ndim != 2:
# raise TypeError("inputs xi and yi must be 1D or 2D.")
#if not len(x)==len(y)==len(z):
# raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
#if hasattr(z,'mask'):
# x = x.compress(z.mask == False)
# y = y.compress(z.mask == False)
# z = z.compressed()
#if xi.ndim == 1:
# xi,yi = meshgrid(xi,yi)
#triangulate data
tri=delaunay.Triangulation(x, y)
#calculate triangles area
ntriangles=tri.circumcenters.shape[0]
coord=array(zip(tri.x, tri.y))
#I=zeros((ntriangles, ))
#xc=zeros((ntriangles, ))
#yc=zeros((ntriangles, ))
# for i in range(ntriangles):
# i1, i2, i3=tri.triangle_nodes[i]
# p1=coord[i1]
# p2=coord[i2]
# p3=coord[i3]
# v1=p1-p2
# v2=p3-p2
# I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0]))
# # the circumcenter data from the triangulation, has some problems so we
# # recalculate it
# xc[i], yc[i]=(p1+p2+p3)/3.
# The previous code was replaced by the following code
###
i1=tri.triangle_nodes[:, 0]
i2=tri.triangle_nodes[:, 1]
i3=tri.triangle_nodes[:, 2]
p1=coord[i1]
p2=coord[i2]
p3=coord[i3]
v1=p1-p2
v2=p3-p2
I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0]))
c=(p1+p2+p3)/3.
xc=c[:, 0]
yc=c[:, 1]
###
# Because of the triangulation algorithm, there are some really high values
# in the intensity data. To filter these values, remove the 5% points of the
# higher intensity.
ni=int(0.1*len(I))
j=I.argsort()[:-ni]
xc=xc[j]
yc=yc[j]
I=I[j]
I=I/I.max()
# #print tri.circumcenters[:, 0]
# #print tri.circumcenters.shape
# print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape
#itri=delaunay.Triangulation(xc,yc)
#inti=itri.linear_interpolator(I)
#xi,yi = meshgrid(xi,yi)
#d1=itri(xi, yi)
#Interpolacion con Splines
#di=interpolate.SmoothBivariateSpline(xc, yc, I)
#d1=di(xi,yi)
#Interpolacion nn, y generación de pupila
xi,yi = meshgrid(xi,yi)
d1=griddata(xc, yc, I,xi, yi )
return d1
def hitlist2int_list(x, y):
"""Function that estimates an intensity distribution on a plane from a
ray hitlist. Returns the intensity samples as an x,y,I list
"""
import matplotlib.delaunay as delaunay
from pylab import griddata, meshgrid
from scipy import interpolate
#if xi.ndim != yi.ndim:
# raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
#if xi.ndim != 1 and xi.ndim != 2:
# raise TypeError("inputs xi and yi must be 1D or 2D.")
#if not len(x)==len(y)==len(z):
# raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
#if hasattr(z,'mask'):
# x = x.compress(z.mask == False)
# y = y.compress(z.mask == False)
# z = z.compressed()
#if xi.ndim == 1:
# xi,yi = meshgrid(xi,yi)
#triangulate data
tri=delaunay.Triangulation(x, y)
#calculate triangles area
ntriangles=tri.circumcenters.shape[0]
coord=array(zip(tri.x, tri.y))
#I=zeros((ntriangles, ))
#xc=zeros((ntriangles, ))
#yc=zeros((ntriangles, ))
# for i in range(ntriangles):
# i1, i2, i3=tri.triangle_nodes[i]
# p1=coord[i1]
# p2=coord[i2]
# p3=coord[i3]
# v1=p1-p2
# v2=p3-p2
# I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0]))
# # the circumcenter data from the triangulation, has some problems so we
# # recalculate it
# xc[i], yc[i]=(p1+p2+p3)/3.
# The previous code was replaced by the following code
###
i1=tri.triangle_nodes[:, 0]
i2=tri.triangle_nodes[:, 1]
i3=tri.triangle_nodes[:, 2]
p1=coord[i1]
p2=coord[i2]
p3=coord[i3]
v1=p1-p2
v2=p3-p2
I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0]))
c=(p1+p2+p3)/3.
xc=c[:, 0]
yc=c[:, 1]
###
# Because of the triangulation algorithm, there are some really high values
# in the intensity data. To filter these values, remove the 5% points of the
# higher intensity.
ni=int(0.1*len(I))
j=I.argsort()[:-ni]
xc=xc[j]
yc=yc[j]
I=I[j]
I=I/I.max()
# #print tri.circumcenters[:, 0]
# #print tri.circumcenters.shape
# print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape
#itri=delaunay.Triangulation(xc,yc)
#inti=itri.linear_interpolator(I)
#xi,yi = meshgrid(xi,yi)
#d1=itri(xi, yi)
#Interpolacion con Splines
#di=interpolate.SmoothBivariateSpline(xc, yc, I)
#d1=di(xi,yi)
return xc,yc,I
def unwrapv(inph,in_p=(), uv=2*pi):
"""Return the input matrix unwraped the value given in uv
This is a vectorized routine, but is not as fast as it should
"""
if not is_masked(inph):
fasei=MaskedArray(inph, isnan(inph))
else:
fasei=inph.copy()
size=fasei.shape
nx, ny=size
# If the initial unwraping point is not given, take the center of the image
# as initial coordinate
if in_p==():
in_p=(int(size[0]/2),int(size[1]/2))
# Create a temporal space to mark if the points are already unwrapped
# 0 the point has not been unwrapped
# 1 the point has not been unwrapped, but it is in the unwrapping list
# 2 the point was already unwrapped
fl=N.zeros(size)
# List containing the points to unwrap
l_un=[in_p]
fl[in_p]=1
# unwrapped values
faseo=fasei.copy()
XI_, YI_= meshgrid(range(-1, 2), range(-1, 2))
XI_=XI_.flatten()
YI_=YI_.flatten()
while len(l_un)>0:
# remove the first value from the list
unp=l_un.pop(0)
#l_un[0:1]=[]
XI=XI_+unp[0]
YI=YI_+unp[1]
#Remove from the list the values where XI is negative
nxi=XI>-1
nyi=YI>-1
nxf=XI<nx
nyf=YI<ny
n=nonzero(nxi& nyi & nxf & nyf)
lco=zip(XI[n], YI[n])
# Put the coordinates of unwrapped the neigbors in the list
# And check for wrapping
nv=0
wv=0
for co in lco:
if (fl[co]==0) & (faseo.mask[co]==False):
fl[co]=1
l_un.append(co)
elif fl[co]==2:
wv=wv+rint((faseo[co]-faseo[unp])/uv)
nv=nv+1
if nv!=0:
wv=wv/nv
#if wv>=0: wv=int(wv+0.5)
#else: wv=int(wv-0.5)
fl[unp]=2
faseo[unp]=faseo[unp]+wv*uv
return faseo
def unwrap_py(inph,in_p=(), uv=2*pi):
"""Return the input matrix unwraped the valu given in uv
The same as unwrapv, but using for-s, written in python
"""
if not is_masked(inph):
fasei=MaskedArray(inph, isnan(inph))
else:
fasei=inph
nx, ny=(fasei.shape[0],fasei.shape[1])
# If the initial unwraping point is not given, take the center of the image
# as initial coordinate
if in_p==():
in_p=(int(nx/2),int(ny/2))
# Create a temporal space to mark if the points are already unwrapped
# 0 the point has not been unwrapped
# 1 the point has not been unwrapped, but it is in the unwrapping list
# 2 the point was already unwrapped
fl=zeros((nx, ny))
# List containing the points to unwrap
l_un=[in_p]
fl[in_p]=1
# unwrapped values
faseo=fasei.copy()
while len(l_un)>0:
# remove the first value from the list
cx, cy=l_un.pop(0)
# Put the coordinates of unwrapped the neigbors in the list
# And check for wrapping
nv=0
wv=0
for i in range(cx-1, cx+2):
for j in range(cy-1, cy+2):
if (i>-1) and (i<nx) and (j>-1) and (j<ny):
if (fl[i, j]==0)&(faseo.mask[i, j]==False):
fl[i, j]=1
l_un.append((i, j))
elif fl[i, j]==2:
wv=wv+rint((faseo[i, j]-faseo[cx, cy])/uv)
nv=nv+1
if nv!=0:
wv=wv/nv
fl[cx, cy]=2
faseo[cx, cy]=faseo[cx, cy]+wv*uv
return faseo
def interpolate_g(xi,yi,zi,xx,yy,knots=10, error=False,mask=None):
"""Create a grid of zi values interpolating the values from xi,yi,zi
xi,yi,zi 1D Lists or arrays containing the values to use as base for the interpolation
xx,yy 1D vectors or lists containing the output coordinates
samples tuple containing the shape of the output array.
knots number of knots to be used in each direction
error if set to true, half of the points (x, y, z) are used to create
the interpolation, and half are used to evaluate the interpolation error
"""
xi=array(xi)
yi=array(yi)
zi=array(zi)
#print xi
#print yi
#print zi
assert xi.ndim==1 ,"xi must ba a 1D array or list"
assert yi.ndim==1 ,"yi must ba a 1D array or list"
assert zi.ndim==1 ,"zi must ba a 1D array or list"
assert xx.ndim==1 ,"xx must ba a 1D array or list"
assert yy.ndim==1 ,"yy must ba a 1D array or list"
assert len(xi)==len(yi) and len(xi)==len(zi), "xi, yi, zi must have the same number of items"
if error==True:
# Create a list of indexes to be able to select the points that are going
# to be used as spline generators, and as control points
idx=where(arange(len(xi)) %2 ==0, False, True)
# Use only half of the samples to create the Spline,
if error == True:
isp=argwhere(idx==True)
ich=argwhere(idx==False)
xsp=xi[isp]
ysp=yi[isp]
zsp=zi[isp]
xch=xi[ich]
ych=yi[ich]
zch=zi[ich]
else:
xsp=xi
ysp=yi
zsp=zi
#Distribute homogeneously the knots
xk=linspace(xsp.min(), xsp.max(),knots)
yk=linspace(ysp.min(), ysp.max(),knots)
# LSQBivariateSpline using some knots gives smaller error than
# SmoothBivariateSpline
di=interpolate.LSQBivariateSpline(xsp, ysp, zsp, xk[1:-1], yk[1:-1])
#print xsp,ysp,zsp
#di=interpolate.SmoothBivariateSpline(xsp, ysp, zsp)
# Evaluate error
if error==True:
zch1=di.ev(xch, ych)
er=(zch.flatten()-zch1).std()
if mask==None:
#d=griddata(xi, yi, zi, xx, yy) #
d=di(xx,yy).transpose()
else:
d=ma_array(di(xx,yy).transpose(), mask=mask)
if error==True: return d, er
else: return d
####### Fin Funciones auxiliares
|
bsd-3-clause
|
peterwilletts24/Python-Scripts
|
plot_scripts/EMBRACE/heat_flux/plot_from_pp_3217_diff_8km.py
|
2
|
5618
|
"""
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['dklyu']
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = '3217_mean'
degs_crop_top = 3.7
degs_crop_bottom = 3.5
degs_crop_left = 2
degs_crop_right = 3
min_contour = -50
max_contour = 50
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Load diff cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/dkmb/dkmbq/%s.pp' % pp_file
glob = iris.load_cube(gl)
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile)
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
#pcube.remove_coord('grid_latitude')
#pcube.remove_coord('grid_longitude')
#pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
#pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lon)
lon_max=np.max(lon)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lat)
lat_max=np.max(lat)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
pcubediff=pcube-glob
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_min+degs_crop_left,lon_max-degs_crop_right,lat_min+degs_crop_bottom,lat_max-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,9)
cont = iplt.contourf(pcubediff, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+divisor,divisor))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+divisor,divisor))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s%s/%s/%s_%s_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
mit
|
pianomania/scikit-learn
|
examples/gaussian_process/plot_gpc_iris.py
|
81
|
2231
|
"""
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
idlead/scikit-learn
|
examples/model_selection/plot_validation_curve.py
|
141
|
1931
|
"""
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
kcavagnolo/astroML
|
book_figures/chapter9/fig_rrlyrae_lda.py
|
3
|
4178
|
"""
LDA Classification of photometry
--------------------------------
Figure 9.4
The linear discriminant boundary for RR Lyrae stars (see caption of figure 9.3
for details). With all four colors, LDA achieves a completeness of 0.672 and a
contamination of 0.806.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import colors
from sklearn.lda import LDA
from astroML.datasets import fetch_rrlyrae_combined
from astroML.utils import split_samples
from astroML.utils import completeness_contamination
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# get data and split into training & testing sets
X, y = fetch_rrlyrae_combined()
X = X[:, [1, 0, 2, 3]] # rearrange columns for better 1-color results
(X_train, X_test), (y_train, y_test) = split_samples(X, y, [0.75, 0.25],
random_state=0)
N_tot = len(y)
N_st = np.sum(y == 0)
N_rr = N_tot - N_st
N_train = len(y_train)
N_test = len(y_test)
N_plot = 5000 + N_rr
#----------------------------------------------------------------------
# perform LDA
classifiers = []
predictions = []
Ncolors = np.arange(1, X.shape[1] + 1)
for nc in Ncolors:
clf = LDA()
clf.fit(X_train[:, :nc], y_train)
y_pred = clf.predict(X_test[:, :nc])
classifiers.append(clf)
predictions.append(y_pred)
completeness, contamination = completeness_contamination(predictions, y_test)
print("completeness", completeness)
print("contamination", contamination)
#------------------------------------------------------------
# Compute the decision boundary
clf = classifiers[1]
xlim = (0.7, 1.35)
ylim = (-0.15, 0.4)
xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 71),
np.linspace(ylim[0], ylim[1], 81))
Z = clf.predict_proba(np.c_[yy.ravel(), xx.ravel()])
Z = Z[:, 1].reshape(xx.shape)
#----------------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.0,
left=0.1, right=0.95, wspace=0.2)
# left plot: data and decision boundary
ax = fig.add_subplot(121)
im = ax.scatter(X[-N_plot:, 1], X[-N_plot:, 0], c=y[-N_plot:],
s=4, lw=0, cmap=plt.cm.binary, zorder=2)
im.set_clim(-0.5, 1)
im = ax.imshow(Z, origin='lower', aspect='auto',
cmap=plt.cm.binary, zorder=1,
extent=xlim + ylim)
im.set_clim(0, 1.5)
ax.contour(xx, yy, Z, [0.5], colors='k')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel('$u-g$')
ax.set_ylabel('$g-r$')
# plot completeness vs Ncolors
ax = fig.add_subplot(222)
ax.plot(Ncolors, completeness, 'o-k', ms=6, label='unweighted')
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('completeness')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
# plot contamination vs Ncolors
ax = fig.add_subplot(224)
ax.plot(Ncolors, contamination, 'o-k', ms=6, label='unweighted')
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%i'))
ax.set_xlabel('N colors')
ax.set_ylabel('contamination')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
plt.show()
|
bsd-2-clause
|
Obus/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
great-expectations/great_expectations
|
great_expectations/render/renderer/v3/suite_profile_notebook_renderer.py
|
1
|
6852
|
from typing import Any, Dict, List, Union
import nbformat
from great_expectations import DataContext
from great_expectations.core.batch import BatchRequest
from great_expectations.render.renderer.suite_edit_notebook_renderer import (
SuiteEditNotebookRenderer,
)
class SuiteProfileNotebookRenderer(SuiteEditNotebookRenderer):
def __init__(
self,
context: DataContext,
expectation_suite_name: str,
batch_request: Union[str, Dict[str, Union[str, int, Dict[str, Any]]]],
):
super().__init__(context=context)
if batch_request is None:
batch_request = {}
self.batch_request = batch_request
self.validator = context.get_validator(
batch_request=BatchRequest(**batch_request),
expectation_suite_name=expectation_suite_name,
)
self.expectation_suite_name = self.validator.expectation_suite_name
# noinspection PyMethodOverriding
def add_header(self):
self.add_markdown_cell(
markdown=f"""# Initialize a new Expectation Suite by profiling a batch of your data.
This process helps you avoid writing lots of boilerplate when authoring suites by allowing you to select columns and other factors that you care about and letting a profiler write some candidate expectations for you to adjust.
**Expectation Suite Name**: `{self.expectation_suite_name}`
"""
)
self.add_code_cell(
code=f"""\
import datetime
import pandas as pd
import great_expectations as ge
import great_expectations.jupyter_ux
from great_expectations.core.batch import BatchRequest
from great_expectations.profile.user_configurable_profiler import UserConfigurableProfiler
from great_expectations.checkpoint import SimpleCheckpoint
from great_expectations.exceptions import DataContextError
context = ge.data_context.DataContext()
batch_request = {self.batch_request}
expectation_suite_name = "{self.expectation_suite_name}"
validator = context.get_validator(
batch_request=BatchRequest(**batch_request),
expectation_suite_name=expectation_suite_name
)
column_names = [f'"{{column_name}}"' for column_name in validator.columns()]
print(f"Columns: {{', '.join(column_names)}}.")
validator.head(n_rows=5, fetch_all=False)
""",
lint=True,
)
def _add_available_columns_list(self):
column_names: List[str]
column_name: str
column_names = [
f' "{column_name}"\n,' for column_name in self.validator.columns()
]
code: str = f'ignored_columns = [\n{"".join(column_names)}]'
self.add_code_cell(code=code, lint=True)
def add_footer(self):
self.add_markdown_cell(
markdown="""# Save & review your new Expectation Suite
Let's save the draft expectation suite as a JSON file in the
`great_expectations/expectations` directory of your project and rebuild the Data
Docs site to make it easy to review your new suite."""
)
code_cell: str = """\
print(validator.get_expectation_suite(discard_failed_expectations=False))
validator.save_expectation_suite(discard_failed_expectations=False)
checkpoint_config = {
"class_name": "SimpleCheckpoint",
"validations": [
{
"batch_request": batch_request,
"expectation_suite_name": expectation_suite_name
}
]
}
checkpoint = SimpleCheckpoint(
f"_tmp_checkpoint_{expectation_suite_name}",
context,
**checkpoint_config
)
checkpoint_result = checkpoint.run()
context.build_data_docs()
validation_result_identifier = checkpoint_result.list_validation_result_identifiers()[0]
context.open_data_docs(resource_identifier=validation_result_identifier)
"""
self.add_code_cell(code=code_cell, lint=True)
self.add_markdown_cell(
markdown=f"""## Next steps
After you review this initial Expectation Suite in Data Docs you
should edit this suite to make finer grained adjustments to the expectations.
This can be done by running `great_expectations suite edit {self.expectation_suite_name}`."""
)
# noinspection PyMethodOverriding
def render(self) -> nbformat.NotebookNode:
self._notebook = nbformat.v4.new_notebook()
self.add_header()
self.add_markdown_cell(
markdown="""# Select columns
Select the columns on which you would like to set expectations and those which you would like to ignore.
Great Expectations will choose which expectations might make sense for a column based on the **data type** and **cardinality** of the data in each selected column.
Simply comment out columns that are important and should be included. You can select multiple lines and
use a jupyter keyboard shortcut to toggle each line: **Linux/Windows**:
`Ctrl-/`, **macOS**: `Cmd-/`"""
)
self._add_available_columns_list()
self.add_markdown_cell(
markdown="""# Run the data profiler
The suites generated here are **not meant to be production suites** -- they are **a starting point to build upon**.
**To get to a production-grade suite, you will definitely want to [edit this
suite](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_edit_an_expectation_suite_using_a_disposable_notebook.html?utm_source=notebook&utm_medium=profile_based_expectations)
after this initial step gets you started on the path towards what you want.**
This is highly configurable depending on your goals.
You can ignore columns or exclude certain expectations, specify a threshold for creating value set expectations, or even specify semantic types for a given column.
You can find more information about [how to configure this profiler, including a list of the expectations that it uses, here.](https://docs.greatexpectations.io/en/latest/guides/how_to_guides/creating_and_editing_expectations/how_to_create_an_expectation_suite_with_the_user_configurable_profiler.html)
"""
)
self._add_profiler_cell()
self.add_footer()
return self._notebook
# noinspection PyMethodOverriding
def render_to_disk(self, notebook_file_path: str):
"""
Render a notebook to disk from an expectation suite.
"""
self.render()
self.write_notebook_to_disk(
notebook=self._notebook, notebook_file_path=notebook_file_path
)
def _add_profiler_cell(self):
self.add_code_cell(
code=f"""\
profiler = UserConfigurableProfiler(
profile_dataset=validator,
excluded_expectations=None,
ignored_columns=ignored_columns,
not_null_only=False,
primary_or_compound_key=False,
semantic_types_dict=None,
table_expectations_only=False,
value_set_threshold="MANY",
)
suite = profiler.build_suite()""",
lint=True,
)
|
apache-2.0
|
appapantula/scikit-learn
|
examples/classification/plot_classification_probability.py
|
242
|
2624
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
kkuunnddaannkk/vispy
|
vispy/color/colormap.py
|
4
|
38235
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import inspect
import numpy as np
from .color_array import ColorArray
from ..ext.six import string_types
from ..ext.cubehelix import cubehelix
from ..ext.husl import husl_to_rgb
###############################################################################
# Color maps
# Utility functions for interpolation in NumPy.
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _find_controls(x, controls=None, clip=None):
x_controls = np.clip(np.searchsorted(controls, x) - 1, 0, clip)
return x_controls.astype(np.int32)
# Normalization
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
# Interpolation functions in NumPy.
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
def _interpolate_multi(colors, x, controls):
x = x.ravel()
n = len(colors)
# For each element in x, the control index of its bin's left boundary.
x_step = _find_controls(x, controls, n-2)
# The length of each bin.
controls_length = np.diff(controls).astype(np.float32)
# Prevent division by zero error.
controls_length[controls_length == 0.] = 1.
# Like x, but relative to each bin.
_to_clip = x - controls[x_step]
_to_clip /= controls_length[x_step]
x_rel = np.clip(_to_clip, 0., 1.)
return (colors[x_step],
colors[x_step + 1],
x_rel[:, None])
def mix(colors, x, controls=None):
a, b, x_rel = _interpolate_multi(colors, x, controls)
return _mix_simple(a, b, x_rel)
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
# GLSL interpolation functions.
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
def _glsl_step(controls=None):
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls) - 1
assert ncolors >= 2
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
s += """%s {\n return $color_%d;\n} """ % (ifs, i)
return """vec4 colormap(float t) {\n%s\n}""" % s
# Mini GLSL template system for colors.
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
class BaseColormap(object):
"""Class representing a colormap:
t \in [0, 1] --> rgba_color
Parameters
----------
colors : list of lists, tuples, or ndarrays
The control colors used by the colormap (shape = (ncolors, 4)).
Notes
-----
Must be overriden. Child classes need to implement:
glsl_map : string
The GLSL function for the colormap. Use $color_0 to refer
to the first color in `colors`, and so on. These are vec4 vectors.
map(item) : function
Takes a (N, 1) vector of values in [0, 1], and returns a rgba array
of size (N, 4).
"""
# Control colors used by the colormap.
colors = None
# GLSL string with a function implementing the color map.
glsl_map = None
def __init__(self, colors=None):
# Ensure the colors are arrays.
if colors is not None:
self.colors = colors
if not isinstance(self.colors, ColorArray):
self.colors = ColorArray(self.colors)
# Process the GLSL map function by replacing $color_i by the
if len(self.colors) > 0:
self.glsl_map = _process_glsl_template(self.glsl_map,
self.colors.rgba)
def map(self, item):
"""Return a rgba array for the requested items.
This function must be overriden by child classes.
This function doesn't need to implement argument checking on `item`.
It can always assume that `item` is a (N, 1) array of values between
0 and 1.
Parameters
----------
item : ndarray
An array of values in [0,1].
Returns
-------
rgba : ndarray
An array with rgba values, with one color per item. The shape
should be ``item.shape + (4,)``.
Notes
-----
Users are expected to use a colormap with ``__getitem__()`` rather
than ``map()`` (which implements a lower-level API).
"""
raise NotImplementedError()
def __getitem__(self, item):
if isinstance(item, tuple):
raise ValueError('ColorArray indexing is only allowed along '
'the first dimension.')
# Ensure item is either a scalar or a column vector.
item = _vector(item, type='column')
# Clip the values in [0, 1].
item = np.clip(item, 0., 1.)
colors = self.map(item)
return ColorArray(colors)
def __setitem__(self, item, value):
raise RuntimeError("It is not possible to set items to "
"BaseColormap instances.")
def _repr_html_(self):
n = 100
html = ("""
<style>
table.vispy_colormap {
height: 30px;
border: 0;
margin: 0;
padding: 0;
}
table.vispy_colormap td {
width: 3px;
border: 0;
margin: 0;
padding: 0;
}
</style>
<table class="vispy_colormap">
""" +
'\n'.join([(("""<td style="background-color: %s;"
title="%s"></td>""") % (color, color))
for color in self[np.linspace(0., 1., n)].hex]) +
"""
</table>
""")
return html
def _default_controls(ncolors):
"""Generate linearly spaced control points from a set of colors."""
return np.linspace(0., 1., ncolors)
# List the parameters of every supported interpolation mode.
_interpolation_info = {
'linear': {
'ncontrols': lambda ncolors: ncolors, # take ncolors as argument
'glsl_map': _glsl_mix, # take 'controls' as argument
'map': mix,
},
'zero': {
'ncontrols': lambda ncolors: (ncolors+1),
'glsl_map': _glsl_step,
'map': step,
}
}
class Colormap(BaseColormap):
"""A colormap defining several control colors and an interpolation scheme.
Parameters
----------
colors : list of colors | ColorArray
The list of control colors. If not a ``ColorArray``, a new
``ColorArray`` instance is created from this list. See the
documentation of ``ColorArray``.
controls : array-like
The list of control points for the given colors. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Examples
--------
Here is a basic example:
>>> from vispy.color import Colormap
>>> cm = Colormap(['r', 'g', 'b'])
>>> cm[0.], cm[0.5], cm[np.linspace(0., 1., 100)]
"""
def __init__(self, colors, controls=None, interpolation='linear'):
self.interpolation = interpolation
ncontrols = self._ncontrols(len(colors))
# Default controls.
if controls is None:
controls = _default_controls(ncontrols)
assert len(controls) == ncontrols
self._controls = np.array(controls, dtype=np.float32)
self.glsl_map = self._glsl_map_generator(self._controls)
super(Colormap, self).__init__(colors)
@property
def interpolation(self):
"""The interpolation mode of the colormap"""
return self._interpolation
@interpolation.setter
def interpolation(self, val):
if val not in _interpolation_info:
raise ValueError('The interpolation mode can only be one of: ' +
', '.join(sorted(_interpolation_info.keys())))
# Get the information of the interpolation mode.
info = _interpolation_info[val]
# Get the function that generates the GLSL map, as a function of the
# controls array.
self._glsl_map_generator = info['glsl_map']
# Number of controls as a function of the number of colors.
self._ncontrols = info['ncontrols']
# Python map function.
self._map_function = info['map']
self._interpolation = val
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
class CubeHelixColormap(Colormap):
def __init__(self, start=0.5, rot=1, gamma=1.0, reverse=True, nlev=32,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1., **kwargs):
"""Cube helix colormap
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to
False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 32.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
"""
super(CubeHelixColormap, self).__init__(
cubehelix(start=start, rot=rot, gamma=gamma, reverse=reverse,
nlev=nlev, minSat=minSat, maxSat=maxSat,
minLight=minLight, maxLight=maxLight, **kwargs))
class _Fire(BaseColormap):
colors = [(1.0, 1.0, 1.0, 1.0),
(1.0, 1.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0)]
glsl_map = """
vec4 fire(float t) {
return mix(mix($color_0, $color_1, t),
mix($color_1, $color_2, t*t), t);
}
"""
def map(self, t):
a, b, d = self.colors.rgba
c = _mix_simple(a, b, t)
e = _mix_simple(b, d, t**2)
return _mix_simple(c, e, t)
class _Grays(BaseColormap):
glsl_map = """
vec4 grays(float t) {
return vec4(t, t, t, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, t, np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, t, 1.0], dtype=np.float32)
class _Ice(BaseColormap):
glsl_map = """
vec4 ice(float t) {
return vec4(t, t, 1.0, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, np.ones(t.shape),
np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, 1.0, 1.0], dtype=np.float32)
class _Hot(BaseColormap):
colors = [(0., .33, .66, 1.0),
(.33, .66, 1., 1.0)]
glsl_map = """
vec4 hot(float t) {
return vec4(smoothstep($color_0.rgb, $color_1.rgb, vec3(t, t, t)),
1.0);
}
"""
def map(self, t):
rgba = self.colors.rgba
smoothed = smoothstep(rgba[0, :3], rgba[1, :3], t)
return np.hstack((smoothed, np.ones((len(t), 1))))
class _Winter(BaseColormap):
colors = [(0.0, 0.0, 1.0, 1.0),
(0.0, 1.0, 0.5, 1.0)]
glsl_map = """
vec4 winter(float t) {
return mix($color_0, $color_1, sqrt(t));
}
"""
def map(self, t):
return _mix_simple(self.colors.rgba[0],
self.colors.rgba[1],
np.sqrt(t))
class _SingleHue(Colormap):
"""A colormap which is solely defined by the given hue and value.
Given the color hue and value, this color map increases the saturation
of a color. The start color is almost white but still contains a hint of
the given color, and at the end the color is fully saturated.
Parameters
----------
hue : scalar, optional
The hue refers to a "true" color, without any shading or tinting.
Must be in the range [0, 360]. Defaults to 200 (blue).
saturation_range : array-like, optional
The saturation represents how "pure" a color is. Less saturation means
more white light mixed in the color. A fully saturated color means
the pure color defined by the hue. No saturation means completely
white. This colormap changes the saturation, and with this parameter
you can specify the lower and upper bound. Default is [0.2, 0.8].
value : scalar, optional
The value defines the "brightness" of a color: a value of 0.0 means
completely black while a value of 1.0 means the color defined by the
hue without shading. Must be in the range [0, 1.0]. The default value
is 1.0.
Notes
-----
For more information about the hue values see the `wikipedia page`_.
.. _wikipedia page: https://en.wikipedia.org/wiki/Hue
"""
def __init__(self, hue=200, saturation_range=[0.1, 0.8], value=1.0):
colors = ColorArray([
(hue, saturation_range[0], value),
(hue, saturation_range[1], value)
], color_space='hsv')
super(_SingleHue, self).__init__(colors)
class _HSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
a circular color space.
This means that we change the hue value while keeping the
saturation and value constant.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value (brightness) component of the colors to generate. Must
be in the range [0, 1.0], and the default is 1.0
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=1.0,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
colors = ColorArray([(hue, saturation, value) for hue in hues],
color_space='hsv')
super(_HSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _HUSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
the HUSL hue space.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value component of the colors to generate or "brightness". Must
be in the range [0, 1.0], and the default is 0.7.
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Notes
-----
For more information about HUSL colors see http://husl-colors.org
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=0.7,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
saturation *= 99
value *= 99
colors = ColorArray(
[husl_to_rgb(hue, saturation, value) for hue in hues],
)
super(_HUSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _Diverging(Colormap):
def __init__(self, h_pos=20, h_neg=250, saturation=1.0, value=0.7,
center="light"):
saturation *= 99
value *= 99
start = husl_to_rgb(h_neg, saturation, value)
mid = ((0.133, 0.133, 0.133) if center == "dark" else
(0.92, 0.92, 0.92))
end = husl_to_rgb(h_pos, saturation, value)
colors = ColorArray([start, mid, end])
super(_Diverging, self).__init__(colors)
# https://github.com/matplotlib/matplotlib/pull/4707/files#diff-893cf0348279e9f4570488a7a297ab1eR774
# Taken from original Viridis colormap data in matplotlib implementation
# Sampled 128 points from the raw data-set of 256 samples.
# Sub sampled to 128 points since 256 points causes VisPy to freeze.
# HACK: Ideally, all 256 points should be included, with VisPy generating
# a 1D texture lookup for ColorMap, rather than branching code.
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_colormaps = dict(
# Some colormap presets
autumn=Colormap([(1., 0., 0., 1.), (1., 1., 0., 1.)]),
blues=Colormap([(1., 1., 1., 1.), (0., 0., 1., 1.)]),
cool=Colormap([(0., 1., 1., 1.), (1., 0., 1., 1.)]),
greens=Colormap([(1., 1., 1., 1.), (0., 1., 0., 1.)]),
reds=Colormap([(1., 1., 1., 1.), (1., 0., 0., 1.)]),
spring=Colormap([(1., 0., 1., 1.), (1., 1., 0., 1.)]),
summer=Colormap([(0., .5, .4, 1.), (1., 1., .4, 1.)]),
fire=_Fire(),
grays=_Grays(),
hot=_Hot(),
ice=_Ice(),
winter=_Winter(),
light_blues=_SingleHue(),
orange=_SingleHue(hue=35),
viridis=Colormap(ColorArray(_viridis_data[::2])),
# Diverging presets
coolwarm=Colormap(ColorArray(
[
(226, 0.59, 0.92), (222, 0.44, 0.99), (218, 0.26, 0.97),
(30, 0.01, 0.87),
(20, 0.3, 0.96), (15, 0.5, 0.95), (8, 0.66, 0.86)
],
color_space="hsv"
)),
PuGr=_Diverging(145, 280, 0.85, 0.30),
GrBu=_Diverging(255, 133, 0.75, 0.6),
GrBu_d=_Diverging(255, 133, 0.75, 0.6, "dark"),
RdBu=_Diverging(220, 20, 0.75, 0.5),
# Configurable colormaps
cubehelix=CubeHelixColormap(),
single_hue=_SingleHue,
hsl=_HSL,
husl=_HUSL,
diverging=_Diverging
)
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
def get_colormaps():
"""Return the list of colormap names."""
return _colormaps.copy()
|
bsd-3-clause
|
kiyoto/statsmodels
|
statsmodels/sandbox/distributions/mv_measures.py
|
33
|
6257
|
'''using multivariate dependence and divergence measures
The standard correlation coefficient measures only linear dependence between
random variables.
kendall's tau measures any monotonic relationship also non-linear.
mutual information measures any kind of dependence, but does not distinguish
between positive and negative relationship
mutualinfo_kde and mutualinfo_binning follow Khan et al. 2007
Shiraj Khan, Sharba Bandyopadhyay, Auroop R. Ganguly, Sunil Saigal,
David J. Erickson, III, Vladimir Protopopescu, and George Ostrouchov,
Relative performance of mutual information estimation methods for
quantifying the dependence among short and noisy data,
Phys. Rev. E 76, 026209 (2007)
http://pre.aps.org/abstract/PRE/v76/i2/e026209
'''
import numpy as np
from scipy import stats
from scipy.stats import gaussian_kde
import statsmodels.sandbox.infotheo as infotheo
def mutualinfo_kde(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
yx = np.vstack((y,x))
kde_x = gaussian_kde(x)(x)
kde_y = gaussian_kde(y)(y)
kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y)
mi = mi_obs.sum() / nobs
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_kde_2sample(y, x, normed=True):
'''mutual information of two random variables estimated with kde
'''
nobs = len(x)
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
kde_x = gaussian_kde(x.T)(x.T)
kde_y = gaussian_kde(y.T)(x.T)
#kde_yx = gaussian_kde(yx)(yx)
mi_obs = np.log(kde_x) - np.log(kde_y)
if len(mi_obs) != nobs:
raise ValueError("Wrong number of observations")
mi = mi_obs.mean()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed
else:
return mi
def mutualinfo_binned(y, x, bins, normed=True):
'''mutual information of two random variables estimated with kde
Notes
-----
bins='auto' selects the number of bins so that approximately 5 observations
are expected to be in each bin under the assumption of independence. This
follows roughly the description in Kahn et al. 2007
'''
nobs = len(x)
if not len(y) == nobs:
raise ValueError('both data arrays need to have the same size')
x = np.asarray(x, float)
y = np.asarray(y, float)
#yx = np.vstack((y,x))
## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins)
## fx, binsx_ = np.histogram(x, bins=binsx)
## fy, binsy_ = np.histogram(y, bins=binsy)
if bins == 'auto':
ys = np.sort(y)
xs = np.sort(x)
#quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1])
qbin_sqr = np.sqrt(5./nobs)
quantiles = np.linspace(0, 1, 1./qbin_sqr)
quantile_index = ((nobs-1)*quantiles).astype(int)
#move edges so that they don't coincide with an observation
shift = 1e-6 + np.ones(quantiles.shape)
shift[0] -= 2*1e-6
binsy = ys[quantile_index] + shift
binsx = xs[quantile_index] + shift
elif np.size(bins) == 1:
binsy = bins
binsx = bins
elif (len(bins) == 2):
binsy, binsx = bins
## if np.size(bins[0]) == 1:
## binsx = bins[0]
## if np.size(bins[1]) == 1:
## binsx = bins[1]
fx, binsx = np.histogram(x, bins=binsx)
fy, binsy = np.histogram(y, bins=binsy)
fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx))
pyx = fyx * 1. / nobs
px = fx * 1. / nobs
py = fy * 1. / nobs
mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px))
mi = mi_obs.sum()
if normed:
mi_normed = np.sqrt(1. - np.exp(-2 * mi))
return mi_normed, (pyx, py, px, binsy, binsx), mi_obs
else:
return mi
if __name__ == '__main__':
import statsmodels.api as sm
funtype = ['linear', 'quadratic'][1]
nobs = 200
sig = 2#5.
#x = np.linspace(-3, 3, nobs) + np.random.randn(nobs)
x = np.sort(3*np.random.randn(nobs))
exog = sm.add_constant(x, prepend=True)
#y = 0 + np.log(1+x**2) + sig * np.random.randn(nobs)
if funtype == 'quadratic':
y = 0 + x**2 + sig * np.random.randn(nobs)
if funtype == 'linear':
y = 0 + x + sig * np.random.randn(nobs)
print('correlation')
print(np.corrcoef(y,x)[0, 1])
print('pearsonr', stats.pearsonr(y,x))
print('spearmanr', stats.spearmanr(y,x))
print('kendalltau', stats.kendalltau(y,x))
pxy, binsx, binsy = np.histogram2d(x,y, bins=5)
px, binsx_ = np.histogram(x, bins=binsx)
py, binsy_ = np.histogram(y, bins=binsy)
print('mutualinfo', infotheo.mutualinfo(px*1./nobs, py*1./nobs,
1e-15+pxy*1./nobs, logbase=np.e))
print('mutualinfo_kde normed', mutualinfo_kde(y,x))
print('mutualinfo_kde ', mutualinfo_kde(y,x, normed=False))
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 5, normed=True)
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, 'auto', normed=True)
print('auto')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
ys = np.sort(y)
xs = np.sort(x)
by = ys[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
bx = xs[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)]
mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \
mutualinfo_binned(y, x, (by,bx), normed=True)
print('quantiles')
print('mutualinfo_binned normed', mi_normed)
print('mutualinfo_binned ', mi_obs.sum())
doplot = 1#False
if doplot:
import matplotlib.pyplot as plt
plt.plot(x, y, 'o')
olsres = sm.OLS(y, exog).fit()
plt.plot(x, olsres.fittedvalues)
|
bsd-3-clause
|
ARudiuk/mne-python
|
mne/viz/tests/test_misc.py
|
2
|
4930
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import (io, read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate)
from mne.datasets import testing
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate)
from mne.utils import requires_nibabel, run_tests_if_main, slow_test
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
return io.read_raw_fif(raw_fname, preload=True)
def _get_events():
return read_events(event_fname)
def test_plot_cov():
"""Test plotting of covariances
"""
raw = _get_raw()
cov = read_cov(cov_fname)
with warnings.catch_warnings(record=True): # bad proj
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours
"""
assert_raises(IOError, plot_bem, subject='bad-subject',
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_bem, subject='sample',
subjects_dir=subjects_dir, orientation='bad-ori')
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
def test_plot_events():
"""Test plotting events
"""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
plot_events(events, raw.info['sfreq'], raw.first_samp)
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 1}, color=color)
assert_raises(ValueError, plot_events, events, raw.info['sfreq'],
raw.first_samp, event_id={'aud_l': 111}, color=color)
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram
"""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
assert_raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
@slow_test
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate
"""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes
"""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
run_tests_if_main()
|
bsd-3-clause
|
rayNymous/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py
|
72
|
6429
|
"""
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
|
agpl-3.0
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/numpy/lib/twodim_base.py
|
10
|
25817
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
mit
|
Nandini-K/Artificial_Intelligence_and_Machine_Learning
|
K-Means Clustering.py
|
1
|
5296
|
##########################################################################
# Copyright (c) 2017 Nandini Khanwalkar
# [email protected]
##########################################################################
import os
import random
import math
import numpy as np
import sklearn
from sklearn.metrics import *
train_data = np.loadtxt('optdigits.train', delimiter=',')
X_train, y_train = train_data[:, np.arange(64)], train_data[:, 64]
test_data = np.loadtxt('optdigits.test', delimiter=',')
X_test, y_test = test_data[:, np.arange(64)], test_data[:, 64]
def find_nearest_centre(centers, dataset, k):
idx_cluster = [[] for i in range(k)] # Make k lists corresponding to each cluster-center
for i in range(0, dataset.shape[0]): # For each vector in dataset
C_i = np.argmin(np.sqrt(np.sum(np.power(centers - dataset[i, :], 2), axis=1))) # Compute closest cluster center
idx_cluster[C_i].append(i) # Add vector index to that cluster-center's list
return idx_cluster # Return cluster indices
def find_K_means(k):
centroids = np.random.choice(np.arange(17), k*64, replace=1).reshape(k, 64) # Randomly initialize k cluster centers
while True:
idx_cluster = find_nearest_centre(centroids, X_train, k) # Seperate data into clusters by finding nearest center
k_means = []
for i in range(k): # Do for all clusters:
if (len(idx_cluster[i]) != 0): # If the cluster is not empty
k_means.append(np.mean(X_train[idx_cluster[i], :], axis=0)) # Move the center to the mean of all vectors in the cluster
else: # Otherwise
k_means.append(centroids[i, :]) # Keep center at its place
if (np.sum(abs(centroids - k_means)) == 0): # If there is no change in any of the centers
break # Stop moving the centers and exit loop
centroids = np.asarray(k_means) # Otherwise repeat with new set of centers
# Compute Errors :
MSE = []
for i in range(k):
MSE.append(np.nan_to_num(np.divide(np.sum(np.power(X_train[idx_cluster[i], :] - k_means[i], 2)), len(idx_cluster[i]))))
Avg_MSE = np.divide(np.sum(MSE), np.count_nonzero(idx_cluster))
Sq_Sep = 0
for i in range(k):
for j in range(i+1, k):
Sq_Sep += np.sum(np.power(k_means[i] - k_means[j], 2))
MSS = (2*Sq_Sep)/(k*(k-1))
return np.asarray(k_means)[np.nonzero(idx_cluster)[0], :], MSE, Avg_MSE, MSS, np.asarray(idx_cluster)[np.nonzero(idx_cluster)[0]]
def get_best_clustering(k):
Avg_MSE = float('Infinity')
for i in range(5): # Cluster the entire data 5 times
model = find_K_means(k)
if model[2]<Avg_MSE: # If the current Avg_MSE is less than previous then swap the clustering results
best_model = model # Replace best_model with new best_model
Avg_MSE = model[2] # Replace lest Avg_MSE with ne least Avg_MSE
return best_model # Return the clustering results which had least Avg_MSE
def assign_cluster_class(idx_cluster):
cluster_class = []
for i in range(len(idx_cluster)): # Do for each cluster:
count = np.zeros(10)
for j in range(len(y_train[idx_cluster[i]])): # Among all the vectors in that cluster
count[int(y_train[idx_cluster[i]][j])] += 1 # Count the occurence of each class
cluster_class.append(np.argmax(count)) # Find the most frequent class and assign it to that cluster
return cluster_class
def visualize_cluster_centers(Clustering, cluster_class, directory):
rootdir = os.getcwd() # Get current working directory
if not os.path.exists(directory):
os.makedirs(directory) # Create a directory corresponding to value of k in current directory
outdir = os.path.join(rootdir, directory)
for i in range(len(Clustering)): # Create a .pgm file for each cluster
fout = open(os.path.join(outdir, '_'.join(['Cluster', str(i), 'Class', str(cluster_class[i]), '.pgm'])), 'w+')
fout.write('P2\n8 8\n16\n') # Write header in .pgm file
for j in range(64):
fout.write(str(math.floor(Clustering[i,j])) + ' ') # Write image data in .pgm file
def K_means_Clustering(k):
Clustering, MSE, Avg_MSE, MSS, idx_cluster = get_best_clustering(k) # Get best clustering for given k
print('\n Results for (K =', k,') :\n\n\tNo. of clusters = ', len(Clustering), '\n\tAverage Mean Square Error = ', Avg_MSE, '\n\tMean Square Seperation = ', MSS, '\n')
cluster_class = assign_cluster_class(idx_cluster) # Assign classes to each cluster center
print('\tAssigned classes to clusters : ', cluster_class)
test_set_clusters = find_nearest_centre(Clustering, X_test, len(cluster_class)) # Cluster test data by finding nearest center for each vector
pred = np.zeros(y_test.shape[0])
for i in range(len(test_set_clusters)): # For each cluster
pred[test_set_clusters[i]] = cluster_class[i] # For vectors in a cluster, predicted class is the class assigned to the cluster
print('\tClustering Accuracy = ', accuracy_score(y_test, pred), '\n\n Confusion Matrix :\n')
print(confusion_matrix(y_test, pred), '\n')
visualize_cluster_centers(Clustering, cluster_class, '-'.join(['K',str(k)]))
K_means_Clustering(k=10) # Preform K-means clustering for k = 10
k =30
while(k):
print('\n==================================================')
K_means_Clustering(k) # Preform K-means clustering for k = 30 and then for any input k value
k = int(input("\tEnter number of initial random seeds\n\tOR\n\tEnter 0 to exit...\n"))
|
mit
|
kcavagnolo/astroML
|
book_figures/chapter6/fig_GMM_clone.py
|
3
|
2971
|
"""
Cloning a Distribution with Gaussian Mixtures
---------------------------------------------
Figure 6.10
Cloning a two-dimensional distribution. The left panel shows 1000 observed
points. The center panel shows a ten-component Gaussian mixture model fit to
the data (two components dominate over other eight). The third panel shows
5000 points drawn from the model in the second panel.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.mixture import GMM
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Create our data: two overlapping gaussian clumps,
# in a uniform background
np.random.seed(1)
X = np.concatenate([np.random.normal(0, 1, (200, 2)),
np.random.normal(1, 1, (200, 2)),
np.random.normal(4, 1.5, (400, 2)),
9 - 12 * np.random.random((200, 2))])
#------------------------------------------------------------
# Use a GMM to model the density and clone the points
gmm = GMM(5, 'full').fit(X)
X_new = gmm.sample(5000)
xmin = -3
xmax = 9
Xgrid = np.meshgrid(np.linspace(xmin, xmax, 50),
np.linspace(xmin, xmax, 50))
Xgrid = np.array(Xgrid).reshape(2, -1).T
dens = np.exp(gmm.score(Xgrid)).reshape((50, 50))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.05,
bottom=0.12, top=0.9)
# first plot the input
ax = fig.add_subplot(131, aspect='equal')
ax.plot(X[:, 0], X[:, 1], '.k', ms=2)
ax.set_title("Input Distribution")
ax.set_ylabel('$y$')
# next plot the gmm fit
ax = fig.add_subplot(132, aspect='equal')
ax.imshow(dens.T, origin='lower', extent=[xmin, xmax, xmin, xmax],
cmap=plt.cm.binary)
ax.set_title("Density Model")
ax.yaxis.set_major_formatter(plt.NullFormatter())
# next plot the cloned distribution
ax = fig.add_subplot(133, aspect='equal')
ax.plot(X_new[:, 0], X_new[:, 1], '.k', alpha=0.3, ms=2)
ax.set_title("Cloned Distribution")
ax.yaxis.set_major_formatter(plt.NullFormatter())
for ax in fig.axes:
ax.set_xlim(xmin, xmax)
ax.set_ylim(xmin, xmax)
ax.set_xlabel('$x$')
plt.show()
|
bsd-2-clause
|
nomadcube/scikit-learn
|
examples/decomposition/plot_pca_vs_fa_model_selection.py
|
78
|
4510
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
|
bsd-3-clause
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/linalg/basic.py
|
6
|
56557
|
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import atleast_1d, atleast_2d
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh', 'matrix_balance']
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warnings.warn('scipy.linalg.solve\nIll-conditioned matrix detected.'
' Result is not guaranteed to be accurate.\nReciprocal '
'condition number/precision: {} / {}'.format(rcond, E),
RuntimeWarning)
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a * x = b`` for the unknown ``x``
for square ``a`` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite. This key is deprecated
and assume_a = 'pos' keyword is recommended instead. The functionality
is the same. It will be removed in the future.
lower : bool, optional
If True, only the data contained in the lower triangle of `a`. Default
is to use upper triangle. (ignored for ``'gen'``)
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, optional
Valid entries are explained above.
transposed: bool, optional
If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
for complex matrices (only for True).
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
RuntimeWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
Notes
-----
If the input b matrix is a 1D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1D array.
The generic, symmetric, hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
"""
# Flags for 1D or nD right hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accomodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
# Backwards compatibility - old keyword.
if sym_pos:
assume_a = 'pos'
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# Deprecate keyword "debug"
if debug is not None:
warnings.warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in future '
'versions of SciPy.', DeprecationWarning)
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv', 'hesv_lwork'),
(a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv', 'sysv_lwork'),
(a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=None, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
# Deprecate keyword "debug"
if debug is not None:
warnings.warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning)
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=None, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
# Deprecate keyword "debug"
if debug is not None:
warnings.warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning)
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(l, u) = l_and_u
if l + u + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (l+u+1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if l == u == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype)
a2[l:, :] = a1
lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
Examples
--------
Solve the banded system A x = b, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> from scipy.linalg import solveh_banded
`ab` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system H x = b, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array `hb`:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (http://www.jstor.org/stable/2153371) or Bareiss.
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
b = _asarray_validated(b)
if vals.shape[0] != (2*b.shape[0] - 1):
raise ValueError('incompatible dimensions')
if np.iscomplexobj(vals) or np.iscomplexobj(b):
vals = np.asarray(vals, dtype=np.complex128, order='c')
b = np.asarray(b, dtype=np.complex128)
else:
vals = np.asarray(vals, dtype=np.double, order='c')
b = np.asarray(b, dtype=np.double)
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
b_shape = b.shape
b = b.reshape(b.shape[0], -1)
x = np.column_stack(
(levinson(vals, np.ascontiguousarray(b[:, i]))[0])
for i in range(b.shape[1]))
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a one-dimensional vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Incompatible c and b axis lengths')
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
#XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
# Linear Least Squares
class LstsqLapackError(LinAlgError):
pass
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left hand side matrix (2-D array).
b : (M,) or (M, K) array_like
Right hand side matrix or vector (1-D or 2-D array).
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : (0,) or () or (K,) ndarray
Sums of residues, squared 2-norm for each column in ``b - a x``.
If rank of matrix a is ``< N`` or ``N > M``, or ``'gelsy'`` is used,
this is a lenght zero array. If b was 1-D, this is a () shape array
(numpy scalar), otherwise the shape is (K,).
rank : int
Effective rank of matrix `a`.
s : (min(M,N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``. None is returned when ``'gelsy'`` is used.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are wrong.
See Also
--------
optimize.nnls : linear least squares with non-negativity constraint
Examples
--------
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
if iwork == 0:
# this is LAPACK bug 0038: dgelsd does not provide the
# size of the iwork array in query mode. This bug was
# fixed in LAPACK 3.2.2, released July 21, 2010.
mesg = ("internal gelsd driver lwork query error, "
"required iwork dimension not returned. "
"This is likely the result of LAPACK bug "
"0038, fixed in LAPACK 3.2.2 (released "
"July 21, 2010). ")
if lapack_driver is None:
# restart with gelss
lstsq.default_lapack_driver = 'gelss'
mesg += "Falling back to 'gelss' driver."
warnings.warn(mesg, RuntimeWarning)
return lstsq(a, b, cond, overwrite_a, overwrite_b,
check_finite, lapack_driver='gelss')
# can't proceed, bail out
mesg += ("Use a different lapack_driver when calling lstsq"
" or upgrade LAPACK.")
raise LstsqLapackError(mesg)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than ``rcond * largest_singular_value``
are considered zero.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than ``rcond*largest_singular_value``
are considered zero.
If None or -1, suitable machine precision is used.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
rank = np.sum(s > cond * np.max(s))
u = u[:, :rank]
u /= s[:rank]
B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from scipy.linalg import pinvh
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
.. versionadded:: 0.19.0
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
Examples
--------
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, DOI:10.1007/BF02165404
.. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, Available online:
http://arxiv.org/abs/1401.5766
.. [3] : D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
|
mit
|
cojacoo/testcases_echoRD
|
gen_test_coR1.py
|
1
|
4226
|
import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_column
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_g63',experimental=True)
mc = mcp.mcpick_out(mc,'g63.pickle')
runname='gen_test_coR1'
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart-=340
precTS.tend-=340
precTS.intense=2.*0.063*60./1000.# intensity in m3/s
#use modified routines for binned retention definitions
#mc.part_sizefac=500
mc.gridcellA=abs(mc.mgrid.vertfac*mc.mgrid.latfac)
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
#for column:
total_volume=np.pi*0.5**3
mc.particleV=total_volume/(mc.mgrid.vertgrid[0]*mc.mgrid.latgrid[0]*(2*mc.part_sizefac))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects='column2'
mc.colref=True
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_column(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/X',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([leftover,drained,t,TSstore,i]), handle, protocol=2)
|
gpl-3.0
|
ilyes14/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
70
|
12992
|
from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
garyfeng/pybrain
|
examples/rl/environments/shipsteer/shipbench_sde.py
|
26
|
3454
|
from __future__ import print_function
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with SPE on the ShipSteering Environment
#
# Requirements:
# pybrain (tested on rev. 1195, ship env rev. 1202)
# Synopsis:
# shipbenchm.py [<True|False> [logfile]]
# (first argument is graphics flag)
#########################################################################
__author__ = "Martin Felder, Thomas Rueckstiess"
__version__ = '$Id$'
#---
# default backend GtkAgg does not plot properly on Ubuntu 8.04
import matplotlib
matplotlib.use('TkAgg')
#---
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.directsearch.enac import ENAC
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
from pylab import figure, ion
from scipy import mean
import sys
if len(sys.argv) > 1:
useGraphics = eval(sys.argv[1])
else:
useGraphics = False
# create task
env=ShipSteeringEnvironment()
maxsteps = 500
task = GoNorthwardTask(env=env, maxsteps = maxsteps)
# task.env.setRenderer( CartPoleRenderer())
# create controller network
#net = buildNetwork(task.outdim, 7, task.indim, bias=True, outputbias=False)
net = buildNetwork(task.outdim, task.indim, bias=False)
#net.initParams(0.0)
# create agent
learner = ENAC()
learner.gd.rprop = True
# only relevant for RP
learner.gd.deltamin = 0.0001
#agent.learner.gd.deltanull = 0.05
# only relevant for BP
learner.gd.alpha = 0.01
learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
agent.actaspg = False
# create experiment
experiment = EpisodicExperiment(task, agent)
# print weights at beginning
print(agent.module.params)
rewards = []
if useGraphics:
figure()
ion()
pl = MultilinePlotter(autoscale=1.2, xlim=[0, 50], ylim=[0, 1])
pl.setLineStyle(linewidth=2)
# queued version
# experiment._fillQueue(30)
# while True:
# experiment._stepQueueLoop()
# # rewards.append(mean(agent.history.getSumOverSequences('reward')))
# print agent.module.getParameters(),
# print mean(agent.history.getSumOverSequences('reward'))
# clf()
# plot(rewards)
# episodic version
x = 0
batch = 30 #number of samples per gradient estimate (was: 20; more here due to stochastic setting)
while x<5000:
#while True:
experiment.doEpisodes(batch)
x += batch
reward = mean(agent.history.getSumOverSequences('reward'))*task.rewardscale
if useGraphics:
pl.addData(0,x,reward)
print(agent.module.params)
print(reward)
#if reward > 3:
# pass
agent.learn()
agent.reset()
if useGraphics:
pl.update()
if len(sys.argv) > 2:
agent.history.saveToFile(sys.argv[1], protocol=-1, arraysonly=True)
if useGraphics:
pl.show( popup = True)
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
## performance:
## experiment.doEpisodes(5) * 100 without weave:
## real 2m39.683s
## user 2m33.358s
## sys 0m5.960s
## experiment.doEpisodes(5) * 100 with weave:
##real 2m41.275s
##user 2m35.310s
##sys 0m5.192s
##
|
bsd-3-clause
|
seckcoder/lang-learn
|
python/sklearn/examples/svm/plot_oneclass.py
|
5
|
2219
|
"""
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print __doc__
import numpy as np
import pylab as pl
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.title("Novelty Detection")
pl.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=pl.cm.Blues_r)
a = pl.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
pl.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = pl.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = pl.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = pl.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
pl.axis('tight')
pl.xlim((-5, 5))
pl.ylim((-5, 5))
pl.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
pl.xlabel(
"error train: %d/200 ; errors novel regular: %d/20 ; " \
"errors novel abnormal: %d/20"
% (n_error_train, n_error_test, n_error_outliers))
pl.show()
|
unlicense
|
nelango/ViralityAnalysis
|
model/lib/sklearn/linear_model/stochastic_gradient.py
|
31
|
50760
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
mit
|
senthil10/scilifelab
|
scilifelab/io/pandas/picard.py
|
4
|
1924
|
"""pm picard lib"""
import os
import re
import pandas as pd
from scilifelab.io import index_containing_substring
import scilifelab.log
LOG = scilifelab.log.minimal_logger(__name__)
METRICS_TYPES=['align', 'hs', 'dup', 'insert']
def _raw(x):
return (x, None)
def _convert_input(x):
if re.match("^[0-9]+$", x):
return int(x)
elif re.match("^[0-9,.]+$", x):
return float(x.replace(",", "."))
else:
return str(x)
def _read_picard_metrics(f):
if not os.path.exists(f):
LOG.warn("IO failure: no such file {}".format(f))
return (None, None)
with open(f) as fh:
data = fh.readlines()
# Find histogram line
i_hist = index_containing_substring(data, "## HISTOGRAM")
if i_hist == -1:
i = len(data)
else:
i = i_hist
tmp = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[0:i] if not re.match("^[ #\n]", x)]
metrics = pd.DataFrame(tmp[1:], columns=tmp[0])
if i_hist == -1:
return (metrics, None)
tmp = [[_convert_input(y) for y in x.rstrip("\n").split("\t")] for x in data[i_hist:len(data)] if not re.match("^[ #\n]", x)]
hist = pd.DataFrame(tmp[1:], columns=tmp[0])
return (metrics, hist)
# For now: extension maps to tuple (label, description). Label should
# be reused for analysis definitions
EXTENSIONS={'.align_metrics':('align', 'alignment', _read_picard_metrics),
'.hs_metrics':('hs', 'hybrid selection', _read_picard_metrics),
'.dup_metrics':('dup', 'duplication metrics', _read_picard_metrics),
'.insert_metrics':('insert', 'insert size', _read_picard_metrics),
'.eval_metrics':('eval', 'snp evaluation', _raw)
}
def read_metrics(f):
"""Read metrics"""
(_, metrics_type) = os.path.splitext(f)
d = EXTENSIONS[metrics_type][2](f)
return d
|
mit
|
henrykironde/scikit-learn
|
examples/linear_model/plot_sgd_iris.py
|
286
|
2202
|
"""
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
|
bsd-3-clause
|
benoitsteiner/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
72
|
12865
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
kernc/scikit-learn
|
sklearn/naive_bayes.py
|
5
|
28895
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
bsd-3-clause
|
kdheepak/psst
|
psst/plot_bq/bqplot_generators.py
|
1
|
8935
|
import networkx as nx
import numpy as np
import pandas as pd
# Imports for bqplot
from IPython.display import display
from bqplot import (
OrdinalScale, LinearScale, Bars, Lines, Axis, Figure, Tooltip
)
from ipywidgets import HBox, VBox, Dropdown, Layout
class GenDispatchBars(Bars):
"""Creates bqplot Bars Mark from solved PSST model.
Parameters
----------
model : PSSTModel
The model must have been solved to be used.
selected_gen : String (Default: all)
The name of a specific generator you wish to display
bar_colors : :obj:`list` of :obj:`str` (Default: 'CATEGORY10')
List of valid HTML colors. Should be one color for each generator.
padding : Float (Default: 0.2)
Desired spacing between bars.
enable_tooltip : Boolean (Default: True)
Should info be displayed when bars are hovered over
custom_tooltip: bqpot TooltipModel
You can create a custom Ipywidgets Tooltip object and pass it as argument
Attributes
----------
selected_gen : String
The name of the generator currently being displayed.
gens : :obj:`list` of :obj:`str`
List of all generators in model, by name.
x : numpy.ndarray
Numpy array of x values
y : numpy.ndarray, hour of day
Numpy array of y values, gen ED
"""
def __init__(self, model, **kwargs):
# Get data from model
self._data = model.results.power_generated # type: Pandas dataframe
self._gens = list(self._data.columns) # type: list
# retrieve keyword args
self._bar_colors = kwargs.get('bar_colors')
self._selected_gen = kwargs.get('selected_gen')
self._enable_tooltip = kwargs.get('enable_tooltip', True)
self._custom_tooltip = kwargs.get('custom_tooltip')
# Set and adjust vars for bar chart
tt = self._make_tooltip()
self._x_data = range(1, len(self._data)+1)
self._y_data = []
for gen in self._gens:
self._y_data.append(self._data[gen].as_matrix())
x_sc = OrdinalScale()
y_sc = LinearScale()
# Set essential bar chart attributes
if self._bar_colors:
self.colors = self._bar_colors
self._all_colors = self.colors
self.display_legend=True
self.padding=0.2
self.scales={'x': x_sc, 'y':y_sc}
self.tooltip=tt
self.x = self._x_data
self.set_y()
# Construct bqplot Bars object
super(GenDispatchBars, self).__init__(**kwargs)
def set_y(self):
""" Called by change_selected_gen() and by __init__() to determine
the appropriate array of data to use for self.y, either all
generators or just the selected one.
"""
def _setup_y_all():
self.color_mode='auto'
self.labels = self._gens
self.y=self._y_data
self.colors = self._all_colors
def _setup_y_selected(gen_index):
self.color_mode='element'
self.labels = [self._gens[gen_index]]
self.y=self._y_data[gen_index]
try:
self.colors = [self._all_colors[gen_index]]
except IndexError:
self.colors = self._all_colors
if self._selected_gen:
try:
gen_index = self._gens.index(self._selected_gen)
_setup_y_selected(gen_index)
except ValueError:
warnings.warn("You tried to select non-existstant generator. Displaying all.")
_setup_y_all()
else:
_setup_y_all()
def _make_tooltip(self):
"""If toolip is true, create it, either with default
settings or using custom_tooltip object."""
if self._enable_tooltip:
if self._custom_tooltip:
tt = self._custom_tooltip
else:
tt = Tooltip(fields=['x', 'y'], formats=['','.2f'],
labels=['Hour','ED'])
else:
tt = None
return tt
@property
def selected_gen(self):
return self._selected_gen
@selected_gen.setter
def selected_gen(self, gen_name):
self._selected_gen = gen_name
self.set_y()
@property
def gens(self):
return self._gens
@property
def data(self):
return self._data
@property
def enable_tooltip(self):
return self._enable_tooltip
@enable_tooltip.setter
def enable_tooltip(self, value):
if value == True:
self._enable_tooltip = True
elif value == False:
self._enable_tooltip = False
else:
print("Note: You tried setting enable_tooltip to something "
"other than a boolean, so the value did not change.")
tt = self._make_tooltip()
self.tooltip = tt
@property
def custom_tooltip(self):
return self._custom_tooltip
@custom_tooltip.setter
def custom_tooltip(self, custom_tt):
self._custom_tooltip = custom_tt
tt = self._make_tooltip()
self.tooltip = tt
class GenDispatchFigure(Figure):
"""Creates a bqplot Figure from solved PSST model, containing bars.
Parameters
----------
model : PSSTModel
The model must have been solved to be used.
selected_gen : String (Default: all)
The name of a specific generator you wish to display
bar_colors : :obj:`list` of :obj:`str` (Default: 'CATEGORY10')
List of valid HTML colors. Should be one color for each generator.
padding : Float (Default: 0.2)
Desired spacing between bars.
enable_tooltip : Boolean (Default: True)
Should info be displayed when bars are hovered over
custom_tooltip : bqpot TooltipModel
You can create a custom Ipywidgets Tooltip object and pass it as argument
x_label : String (Default: 'Hour')
Label for the figure's x axis
y_label : String (Default: '')
Label for the figure's y axis
additional_marks : :obj:`list` of :obj:`bqplot.Mark`
Add additional bqplot marks to integrate into the Figure
Attributes
----------
bars : bqplot Bars Mark
Can access attribtues like bars.selected_gen, bars.x, and bars.y.
"""
def __init__(self, model, **kwargs):
# Make Bars Marks
self.bars = GenDispatchBars(model, **kwargs)
# Get additional kwargs
x_label = kwargs.get('x_label', 'Hour')
y_label = kwargs.get('y_label', 'Power generated (MW)')
self.custom_title = kwargs.get('custom_title', None)
additional_marks = kwargs.get('additional_marks', [])
# Prepare values
x_sc = self.bars.scales['x']
y_sc = self.bars.scales['y']
ax_x = Axis(scale=x_sc, grid_lines='solid',
label=x_label, num_ticks=24)
ax_y = Axis(scale=y_sc, orientation='vertical',
tick_format='0.2f',
grid_lines='solid', label=y_label)
fig_title = self._make_title()
# Set key attribtues for Figure creation
self.axes = [ax_x, ax_y]
self.title = fig_title
self.marks = [self.bars] + additional_marks
self.animation_duration = 500
# Construct Figure object
super(GenDispatchFigure, self).__init__(**kwargs)
def _make_title(self):
selected_gen = self.bars.selected_gen
if self.custom_title:
fig_title = self.custom_title
else:
fig_title = 'Economic Dispatch for Generators'
if selected_gen:
fig_title = fig_title.replace('Generators', selected_gen)
return fig_title
def change_selected_gen(self, gen_name):
self.bars.selected_gen = gen_name
self.title = self._make_title()
class GenDispatchWidget(VBox):
""" Make intereactive dispatch plot
"""
def __init__(self, model, **kwargs):
# Make Figure with bar chart
self.figure = GenDispatchFigure(model, **kwargs)
# Prepare atts
gens = self.figure.bars.gens
options = ['All'] + gens
# Define Dropdown Menu And Callback Fcn
self.dropdown = Dropdown(
options=options,
value=options[0],
description='Generator:',
)
# Setup callback function, for dropdown selection
def gen_changed(change):
if change.new == 'All':
self.figure.change_selected_gen(None)
else:
self.figure.change_selected_gen(change.new)
self.dropdown.observe(gen_changed, 'value')
super(VBox, self).__init__(children=[self.dropdown, self.figure],
layout=Layout(align_items='center',width='100%',height='100%'),
**kwargs)
|
mit
|
cybernet14/scikit-learn
|
sklearn/ensemble/tests/test_partial_dependence.py
|
365
|
6996
|
"""
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
|
bsd-3-clause
|
semio/ddf_utils
|
ddf_utils/factory/clio_infra.py
|
1
|
2872
|
# -*- coding: utf-8 -*-
"""functions for scraping data from clio infra website.
Source link: `Clio-infra website`_
.. _`Clio-infra website`: https://www.clio-infra.eu/index.html
"""
import os.path as osp
import pandas as pd
from lxml import etree
import requests
from urllib.parse import urljoin
from .common import DataFactory
class ClioInfraLoader(DataFactory):
url = 'https://clio-infra.eu/index.html'
def _get_home_page(self, url):
response = requests.get(url, verify=False)
content = response.content
tree = etree.fromstring(content, parser=etree.HTMLParser())
return tree
def has_newer_source(self, ver):
print('there is no version info in this site.')
raise NotImplementedError
def load_metadata(self):
tree = self._get_home_page(self.url)
elem = tree.xpath('//div[@class="col-sm-4"]/div[@class="list-group"]/p[@class="list-group-item"]')
res1 = {}
res2 = {}
for e in elem:
try:
name = e.find('a').text
link = e.find('*/a').attrib['href']
if '../data' in link: # it's indicator file
res1[name] = link
else: # it's country file
res2[name] = link
except: # FIXME: add exception class here.
name = e.text
res2[name] = ''
# create the metadata dataframe
md_dataset = pd.DataFrame(columns=['name', 'url', 'type'])
md_dataset['name'] = list(res1.keys())
md_dataset['url'] = list(res1.values())
md_dataset['type'] = 'dataset'
md_country = pd.DataFrame(columns=['name', 'url', 'type'])
md_country['name'] = list(res2.keys())
md_country['url'] = list(res2.values())
md_country['type'] = 'country'
self.metadata = pd.concat([md_dataset, md_country], ignore_index=True)
return self.metadata
def bulk_download(self, out_dir, data_type=None):
if self.metadata is None:
self.load_metadata()
metadata = self.metadata
if data_type:
to_download = metadata[metadata['type'] == data_type]
else:
to_download = metadata
for i, row in to_download.iterrows():
name = row['name']
path = row['url']
file_url = urljoin(self.url, path)
res = requests.get(file_url, stream=True, verify=False)
fn = osp.join(out_dir, f'{name}.xlsx')
print("downloading {} to {}".format(file_url, fn))
with open(fn, 'wb') as f:
for chunk in res.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
print('Done downloading source files.')
|
mit
|
mpatacchiola/dissecting-reinforcement-learning
|
src/6/inverted-pendulum/montecarlo_control_inverted_pendulum.py
|
1
|
11299
|
#!/usr/bin/env python
#MIT License
#Copyright (c) 2017 Massimiliano Patacchiola
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#Example of Monte Carlo methods for control.
#In this example I will use the class invertedPendulum to generate an
#environment in which the cleaning robot will move. Using the Monte Carlo method
#I will estimate the policy and the state-action matrix of each state.
import numpy as np
from inverted_pendulum import InvertedPendulum
import matplotlib.pyplot as plt
def print_policy(policy_matrix):
"""Print the policy using specific symbol.
O noop, < left, > right
"""
counter = 0
shape = policy_matrix.shape
policy_string = ""
for row in range(shape[0]):
for col in range(shape[1]):
if(policy_matrix[row,col] == 0): policy_string += " < "
elif(policy_matrix[row,col] == 1): policy_string += " O "
elif(policy_matrix[row,col] == 2): policy_string += " > "
counter += 1
policy_string += '\n'
print(policy_string)
def get_return(state_list, gamma):
"""Get the return for a list of action-state values.
@return get the Return
"""
counter = 0
return_value = 0
for visit in state_list:
reward = visit[2]
return_value += reward * np.power(gamma, counter)
counter += 1
return return_value
def update_policy(episode_list, policy_matrix, state_action_matrix, tot_bins):
"""Update a policy making it greedy in respect of the state-action matrix.
@return the updated policy
"""
for visit in episode_list:
observation = visit[0]
col = observation[1] + (observation[0]*tot_bins)
if(policy_matrix[observation[0], observation[1]] != -1):
policy_matrix[observation[0], observation[1]] = \
np.argmax(state_action_matrix[:,col])
return policy_matrix
def return_decayed_value(starting_value, minimum_value, global_step, decay_step):
"""Returns the decayed value.
decayed_value = starting_value * decay_rate ^ (global_step / decay_steps)
@param starting_value the value before decaying
@param global_step the global step to use for decay (positive integer)
@param decay_step the step at which the value is decayed
"""
decayed_value = starting_value * np.power(0.9, (global_step/decay_step))
if decayed_value < minimum_value:
return minimum_value
else:
return decayed_value
def return_epsilon_greedy_action(policy_matrix, observation, epsilon=0.1):
"""Return an action choosing it with epsilon-greedy
@param policy_matrix the matrix before the update
@param observation the state obsrved at t
@param epsilon the value used for computing the probabilities
@return the updated policy_matrix
"""
tot_actions = int(np.nanmax(policy_matrix) + 1)
action = int(policy_matrix[observation[0], observation[1]])
non_greedy_prob = epsilon / tot_actions
greedy_prob = 1 - epsilon + non_greedy_prob
weight_array = np.full((tot_actions), non_greedy_prob)
weight_array[action] = greedy_prob
return np.random.choice(tot_actions, 1, p=weight_array)
def plot_curve(data_list, filepath="./my_plot.png",
x_label="X", y_label="Y",
x_range=(0, 1), y_range=(0,1), color="-r", kernel_size=50, alpha=0.4, grid=True):
"""Plot a graph using matplotlib
"""
if(len(data_list) <=1):
print("[WARNING] the data list is empty, no plot will be saved.")
return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=x_range, ylim=y_range)
ax.grid(grid)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.plot(data_list, color, alpha=alpha) # The original data is showed in background
kernel = np.ones(int(kernel_size))/float(kernel_size) # Smooth the graph using a convolution
tot_data = len(data_list)
lower_boundary = int(kernel_size/2.0)
upper_boundary = int(tot_data-(kernel_size/2.0))
data_convolved_array = np.convolve(data_list, kernel, 'same')[lower_boundary:upper_boundary]
#print("arange: " + str(np.arange(tot_data)[lower_boundary:upper_boundary]))
#print("Convolved: " + str(np.arange(tot_data).shape))
ax.plot(np.arange(tot_data)[lower_boundary:upper_boundary], data_convolved_array, color, alpha=1.0) # Convolved plot
fig.savefig(filepath)
fig.clear()
plt.close(fig)
# print(plt.get_fignums()) # print the number of figures opened in background
def main():
env = InvertedPendulum(pole_mass=2.0, cart_mass=8.0, pole_lenght=0.5, delta_t=0.1)
# Define the state arrays for velocity and position
tot_action = 3 # Three possible actions
tot_bins = 12 # the value used to discretize the space
velocity_state_array = np.linspace(-np.pi, np.pi, num=tot_bins-1, endpoint=False)
position_state_array = np.linspace(-np.pi/2.0, np.pi/2.0, num=tot_bins-1, endpoint=False)
#Random policy
policy_matrix = np.random.randint(low=0, high=tot_action, size=(tot_bins,tot_bins))
print("Policy Matrix:")
print_policy(policy_matrix)
state_action_matrix = np.zeros((tot_action, tot_bins*tot_bins))
#init with 1.0e-10 to avoid division by zero
running_mean_matrix = np.full((tot_action, tot_bins*tot_bins), 1.0e-10)
gamma = 0.999
tot_episode = 500000 # 500k
epsilon_start = 0.99 # those are the values for epsilon decay
epsilon_stop = 0.1
epsilon_decay_step = 10000
print_episode = 500 # print every...
movie_episode = 20000 # movie saved every...
reward_list = list()
step_list = list()
for episode in range(tot_episode):
epsilon = return_decayed_value(epsilon_start, epsilon_stop, episode, decay_step=epsilon_decay_step)
#Starting a new episode
episode_list = list()
#Reset and return the first observation and reward
observation = env.reset(exploring_starts=True)
observation = (np.digitize(observation[1], velocity_state_array),
np.digitize(observation[0], position_state_array))
#action = np.random.choice(4, 1)
#action = policy_matrix[observation[0], observation[1]]
#episode_list.append((observation, action, reward))
is_starting = True
cumulated_reward = 0
for step in range(100):
#Take the action from the action matrix
action = return_epsilon_greedy_action(policy_matrix, observation, epsilon=epsilon)
#If the episode just started then it is
#necessary to choose a random action (exploring starts)
if(is_starting):
action = np.random.randint(0, tot_action)
is_starting = False
#if(episode % print_episode == 0):
# print("Step: " + str(step) + "; Action: " + str(action) + "; Angle: " + str(observation[0]) + "; Velocity: " + str(observation[1]))
#Move one step in the environment and get obs and reward
new_observation, reward, done = env.step(action)
new_observation = (np.digitize(new_observation[1], velocity_state_array),
np.digitize(new_observation[0], position_state_array))
#Append the visit in the episode list
episode_list.append((observation, action, reward))
observation = new_observation
cumulated_reward += reward
if done: break
#The episode is finished, now estimating the utilities
counter = 0
#Checkup to identify if it is the first visit to a state
checkup_matrix = np.zeros((tot_action, tot_bins*tot_bins))
#This cycle is the implementation of First-Visit MC.
#For each state stored in the episode list check if it
#is the rist visit and then estimate the return.
for visit in episode_list:
observation = visit[0]
action = visit[1]
col = observation[1] + (observation[0]*tot_bins)
row = action
if(checkup_matrix[row, col] == 0):
return_value = get_return(episode_list[counter:], gamma)
running_mean_matrix[row, col] += 1
state_action_matrix[row, col] += return_value
checkup_matrix[row, col] = 1
counter += 1
#Policy Update
policy_matrix = update_policy(episode_list,
policy_matrix,
state_action_matrix/running_mean_matrix,
tot_bins)
# Store the data for statistics
reward_list.append(cumulated_reward)
step_list.append(step)
# Printing utilities
if(episode % print_episode == 0):
print("")
print("Episode: " + str(episode+1))
print("Epsilon: " + str(epsilon))
print("Episode steps: " + str(step+1))
print("Cumulated Reward: " + str(cumulated_reward))
print("Policy matrix: ")
print_policy(policy_matrix)
if(episode % movie_episode == 0):
print("Saving the reward plot in: ./reward.png")
plot_curve(reward_list, filepath="./reward.png",
x_label="Episode", y_label="Reward",
x_range=(0, len(reward_list)), y_range=(-0.1,100),
color="red", kernel_size=500,
alpha=0.4, grid=True)
print("Saving the step plot in: ./step.png")
plot_curve(step_list, filepath="./step.png",
x_label="Episode", y_label="Steps",
x_range=(0, len(step_list)), y_range=(-0.1,100),
color="blue", kernel_size=500,
alpha=0.4, grid=True)
print("Saving the gif in: ./inverted_pendulum.gif")
env.render(file_path='./inverted_pendulum.gif', mode='gif')
print("Complete!")
print("Policy matrix after " + str(tot_episode) + " episodes:")
print_policy(policy_matrix)
if __name__ == "__main__":
main()
|
mit
|
anntzer/scikit-learn
|
sklearn/feature_selection/tests/test_variance_threshold.py
|
14
|
2096
|
import numpy as np
import pytest
from sklearn.utils._testing import assert_array_equal
from scipy.sparse import bsr_matrix, csc_matrix, csr_matrix
from sklearn.feature_selection import VarianceThreshold
data = [[0, 1, 2, 3, 4],
[0, 2, 2, 3, 5],
[1, 1, 2, 4, 0]]
data2 = [[-0.13725701]] * 10
def test_zero_variance():
# Test VarianceThreshold with default setting, zero variance.
for X in [data, csr_matrix(data), csc_matrix(data), bsr_matrix(data)]:
sel = VarianceThreshold().fit(X)
assert_array_equal([0, 1, 3, 4], sel.get_support(indices=True))
with pytest.raises(ValueError):
VarianceThreshold().fit([[0, 1, 2, 3]])
with pytest.raises(ValueError):
VarianceThreshold().fit([[0, 1], [0, 1]])
def test_variance_threshold():
# Test VarianceThreshold with custom variance.
for X in [data, csr_matrix(data)]:
X = VarianceThreshold(threshold=.4).fit_transform(X)
assert (len(data), 1) == X.shape
@pytest.mark.skipif(np.var(data2) == 0,
reason=('This test is not valid for this platform, '
'as it relies on numerical instabilities.'))
def test_zero_variance_floating_point_error():
# Test that VarianceThreshold(0.0).fit eliminates features that have
# the same value in every sample, even when floating point errors
# cause np.var not to be 0 for the feature.
# See #13691
for X in [data2, csr_matrix(data2), csc_matrix(data2), bsr_matrix(data2)]:
msg = "No feature in X meets the variance threshold 0.00000"
with pytest.raises(ValueError, match=msg):
VarianceThreshold().fit(X)
def test_variance_nan():
arr = np.array(data, dtype=np.float64)
# add single NaN and feature should still be included
arr[0, 0] = np.NaN
# make all values in feature NaN and feature should be rejected
arr[:, 1] = np.NaN
for X in [arr, csr_matrix(arr), csc_matrix(arr), bsr_matrix(arr)]:
sel = VarianceThreshold().fit(X)
assert_array_equal([0, 3, 4], sel.get_support(indices=True))
|
bsd-3-clause
|
bsmurphy/PyKrige
|
tests/test_classification_krige.py
|
1
|
3274
|
from itertools import product
import pytest
import numpy as np
from pykrige.ck import ClassificationKriging
try:
from sklearn.svm import SVC
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.model_selection import train_test_split
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
def _methods():
krige_methods = ["ordinary", "universal"]
ml_methods = [
SVC(C=0.01, gamma="auto", probability=True),
RandomForestClassifier(n_estimators=50),
]
return product(ml_methods, krige_methods)
@pytest.mark.skipif(not SKLEARN_INSTALLED, reason="requires scikit-learn")
def test_classification_krige():
np.random.seed(1)
x = np.linspace(-1.0, 1.0, 100)
# create a feature matrix with 5 features
X = np.tile(x, reps=(5, 1)).T
y = (
1
+ 5 * X[:, 0]
- 2 * X[:, 1]
- 2 * X[:, 2]
+ 3 * X[:, 3]
+ 4 * X[:, 4]
+ 2 * (np.random.rand(100) - 0.5)
)
# create lat/lon array
lon = np.linspace(-180.0, 180.0, 10)
lat = np.linspace(-90.0, 90.0, 10)
lon_lat = np.array(list(product(lon, lat)))
discretizer = KBinsDiscretizer(encode="ordinal")
y = discretizer.fit_transform(y.reshape(-1, 1))
X_train, X_test, y_train, y_test, lon_lat_train, lon_lat_test = train_test_split(
X, y, lon_lat, train_size=0.7, random_state=10
)
for ml_model, krige_method in _methods():
class_model = ClassificationKriging(
classification_model=ml_model, method=krige_method, n_closest_points=2
)
class_model.fit(X_train, lon_lat_train, y_train)
assert class_model.score(X_test, lon_lat_test, y_test) > 0.25
@pytest.mark.skipif(not SKLEARN_INSTALLED, reason="requires scikit-learn")
def test_krige_classification_housing():
import ssl
import urllib
try:
housing = fetch_california_housing()
except (ssl.SSLError, urllib.error.URLError):
ssl._create_default_https_context = ssl._create_unverified_context
try:
housing = fetch_california_housing()
except PermissionError:
# This can raise permission error on Appveyor
pytest.skip("Failed to load california housing dataset")
ssl._create_default_https_context = ssl.create_default_context
# take only first 1000
p = housing["data"][:1000, :-2]
x = housing["data"][:1000, -2:]
target = housing["target"][:1000]
discretizer = KBinsDiscretizer(encode="ordinal")
target = discretizer.fit_transform(target.reshape(-1, 1))
p_train, p_test, y_train, y_test, x_train, x_test = train_test_split(
p, target, x, train_size=0.7, random_state=10
)
for ml_model, krige_method in _methods():
class_model = ClassificationKriging(
classification_model=ml_model, method=krige_method, n_closest_points=2
)
class_model.fit(p_train, x_train, y_train)
if krige_method == "ordinary":
assert class_model.score(p_test, x_test, y_test) > 0.5
else:
assert class_model.score(p_test, x_test, y_test) > 0.0
|
bsd-3-clause
|
djgagne/scikit-learn
|
examples/plot_kernel_ridge_regression.py
|
230
|
6222
|
"""
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
ipashchenko/ml4vs
|
ml4vs/gb_nocv.py
|
1
|
8301
|
# -*- coding: utf-8 -*-
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score, f1_score
import xgboost as xgb
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from data_load import load_data, load_data_tgt
# Load data
data_dir = '/home/ilya/code/ml4vs/data/LMC_SC20__corrected_list_of_variables/raw_index_values'
file_1 = 'vast_lightcurve_statistics_variables_only.log'
file_0 = 'vast_lightcurve_statistics_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
# names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
# 'Npts', 'CSSD']
names_to_delete = ['meaningless_1', 'meaningless_2', 'star_ID',
'Npts', 'CSSD', 'clipped_sigma', 'lag1', 'L', 'Lclp', 'Jclp',
'MAD', 'Ltim']
X, y, df, features_names, delta = load_data([file_0, file_1], names, names_to_delete)
target = 'variable'
predictors = list(df)
predictors.remove(target)
dtrain = df
kfold = StratifiedKFold(n_splits=4, shuffle=True, random_state=123)
def xg_f1(y, t):
t = t.get_label()
# Binaryzing your output
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y]
return 'f1', 1-f1_score(t, y_bin)
def objective(space):
clf = xgb.XGBClassifier(n_estimators=10000, learning_rate=0.1,
max_depth=space['max_depth'],
min_child_weight=space['min_child_weight'],
subsample=space['subsample'],
colsample_bytree=space['colsample_bytree'],
colsample_bylevel=space['colsample_bylevel'],
gamma=space['gamma'],
scale_pos_weight=space['scale_pos_weight'])
# scale_pos_weight=space['scale_pos_weight'])
# Try using pipeline
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
# estimators.append(('scaler', StandardScaler()))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
best_n = ""
# CMs = list()
aprs = list()
for train_indx, test_indx in kfold.split(dtrain[predictors].index,
dtrain['variable']):
train = dtrain.iloc[train_indx]
valid = dtrain.iloc[test_indx]
# X_test
valid_ = valid[predictors]
# X_train
train_ = train[predictors]
for name, transform in pipeline.steps[:-1]:
transform.fit(train_)
# X_test
valid_ = transform.transform(valid_)
# X_train
train_ = transform.transform(train_)
eval_set = [(train_, train['variable']),
(valid_, valid['variable'])]
# TODO: Try ES on default eval. metric or AUC!!!
pipeline.fit(train[predictors], train['variable'],
clf__eval_set=eval_set, clf__eval_metric="map",
# clf__eval_set=eval_set, clf__eval_metric=xg_f1,
clf__early_stopping_rounds=50)
pred = pipeline.predict_proba(valid[predictors])[:, 1]
aps = average_precision_score(valid['variable'], pred)
aprs.append(aps)
# CMs.append(confusion_matrix(y[test_indx], pred))
best_n = best_n + " " + str(clf.best_ntree_limit)
# CM = np.sum(CMs, axis=0)
# FN = CM[1][0]
# TP = CM[1][1]
# FP = CM[0][1]
# print "TP = {}".format(TP)
# print "FP = {}".format(FP)
# print "FN = {}".format(FN)
# f1 = 2. * TP / (2. * TP + FP + FN)
APR = np.mean(aprs)
print "=== APR : {} ===".format(APR)
return{'loss': 1-APR, 'status': STATUS_OK ,
'attachments': {'best_n': best_n}}
space ={
'max_depth': hp.choice("x_max_depth", np.arange(5, 12, 1, dtype=int)),
'min_child_weight': hp.quniform('x_min_child', 1, 20, 1),
'subsample': hp.quniform('x_subsample', 0.5, 1, 0.025),
'colsample_bytree': hp.quniform('x_csbtree', 0.25, 1, 0.025),
'colsample_bylevel': hp.quniform('x_csblevel', 0.25, 1, 0.025),
'gamma': hp.quniform('x_gamma', 0.0, 1, 0.025),
'scale_pos_weight': hp.qloguniform('x_spweight', 0, 6, 1),
# 'lr': hp.quniform('lr', 0.001, 0.5, 0.025)
# 'lr': hp.loguniform('lr', -7, -1)
}
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=500,
trials=trials)
import hyperopt
print hyperopt.space_eval(space, best)
best_pars = hyperopt.space_eval(space, best)
best_n = trials.attachments['ATTACH::{}::best_n'.format(trials.best_trial['tid'])]
best_n = max([int(n) for n in best_n.strip().split(' ')])
clf = xgb.XGBClassifier(n_estimators=int(1.25 * best_n),
learning_rate=0.1,
max_depth=best_pars['max_depth'],
min_child_weight=best_pars['min_child_weight'],
subsample=best_pars['subsample'],
colsample_bytree=best_pars['colsample_bytree'],
colsample_bylevel=best_pars['colsample_bylevel'],
gamma=best_pars['gamma'],
scale_pos_weight=best_pars['scale_pos_weight'])
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('clf', clf))
pipeline = Pipeline(estimators)
# Fit classifier with best hyperparameters on whole data set
pipeline.fit(dtrain[predictors], dtrain['variable'])
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df, df_orig = load_data_tgt(file_tgt, names, names_to_delete,
delta)
y_probs = pipeline.predict_proba(df[predictors])[:, 1]
idx = y_probs > 0.5
idx_ = y_probs < 0.5
gb_no = list(df_orig['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('gb_results.txt', 'w') as fo:
for line in list(df_orig['star_ID'][idx]):
fo.write(line + '\n')
# Check F1
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
news = set(news)
with open('gb_results.txt', 'r') as fo:
gb = fo.readlines()
gb = [line.strip().split('_')[4].split('.')[0] for line in gb]
gb = set(gb)
print "Among new vars found {}".format(len(news.intersection(gb)))
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
gb_no = set([line.strip().split('_')[4].split('.')[0] for line in gb_no])
found_bad = '181193' in gb
print "Found known variable : ", found_bad
FN = len(gb_no.intersection(all_vars))
TP = len(all_vars.intersection(gb))
TN = len(gb_no) - FN
FP = len(gb) - TP
recall = float(TP) / (TP + FN)
precision = float(TP) / (TP + FP)
F1 = 2 * precision * recall / (precision + recall)
print "precision: {}".format(precision)
print "recall: {}".format(recall)
print "F1: {}".format(F1)
print "TN={}, FP={}".format(TN, FP)
print "FN={}, TP={}".format(FN, TP)
|
mit
|
mjbrodzik/ipython_notebooks
|
charis/dehra_dun/review_ablation_surfaces.py
|
1
|
7254
|
# coding: utf-8
# ## Review ablation surfaces for 2015 team paper
# In[1]:
get_ipython().magic(u'pylab inline')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#pd.describe_option('display')
pd.set_option('display.max_rows', 370)
pd.set_option('display.max_columns', 70)
pd.set_option('display.width', 200)
# In[2]:
import os
os.chdir( "/Users/brodzik/ipython_notebooks/charis/dehra_dun" )
get_ipython().system(u'pwd')
# In[3]:
def get_surface_type( yyyy, surface_class, surface_type ):
import glob
list = glob.glob( "/Users/brodzik/projects/CHARIS/pdd_melt_model/2015_paper/*" + yyyy + "*" + surface_class + "*" + surface_type + ".dat")
list.sort()
from imp import reload
import hypsometry
reload(hypsometry)
import re
searchRegex = re.compile('(' + surface_class + '.\d{4})').search
labels = [ ( m.group(1) ) for l in list for m in (searchRegex(l),) if m]
# Read the first file to get the dates
area = hypsometry.Hypsometry()
area.read( list[0], verbose=True )
df = pd.DataFrame(index=area.data.index)
# Read each file in turn and total by doy
for label,file in zip(labels,list):
area.read( file, verbose=False )
df[ label ] = area.data_by_doy()
print "Done for " + yyyy + ", " + surface_class + ", " + surface_type
return( df )
# In[4]:
def get_surfaces( yyyy, surface_type):
grsize_scag = get_surface_type( yyyy, 'GRSIZE_SCAG', surface_type )
grsize_drfs = get_surface_type( yyyy, 'GRSIZE_DRFS', surface_type )
albedo_mcd = get_surface_type( yyyy, 'ALBEDO_MCD', surface_type )
albedo_mod10a1 = get_surface_type( yyyy, 'ALBEDO_MOD10A1', surface_type )
return ( grsize_scag, grsize_drfs, albedo_mcd, albedo_mod10a1 )
# In[5]:
def display_surfaces( grsize_scag, grsize_drfs, albedo_mcd, albedo_mod10a1, yyyy, surface_type):
title = surface_type + ', ' + yyyy
fig, axes = plt.subplots(4, 1, figsize=(12,12) )
albedo_mcd.plot( title=title, ax=axes[0] ).legend(bbox_to_anchor=(1.3,1.0))
albedo_mod10a1.plot( title=title, ax=axes[1] ).legend(bbox_to_anchor=(1.3,1.0))
grsize_drfs.plot( title=title, ax=axes[2] ).legend(bbox_to_anchor=(1.3,1.0))
grsize_scag.plot( title=title, ax=axes[3] ).legend(bbox_to_anchor=(1.3,1.0))
# In[6]:
def first_and_last_column( df ):
return (df[[df.columns[0],df.columns[df.columns.size-1]]])
# In[7]:
(gscag_ablation_01, gdrfs_ablation_01, amcd_ablation_01, amod10_ablation_01 ) = get_surfaces( '2001', 'ablation_area' )
(gscag_on_ice_01, gdrfs_on_ice_01, amcd_on_ice_01, amod10_on_ice_01 ) = get_surfaces( '2001', 'snow_on_ice_area' )
(gscag_off_ice_01, gdrfs_off_ice_01, amcd_off_ice_01, amod10_off_ice_01 ) = get_surfaces( '2001', 'snow_off_ice_area' )
# In[8]:
(gscag_ablation_04, gdrfs_ablation_04, amcd_ablation_04, amod10_ablation_04 ) = get_surfaces( '2004', 'ablation_area' )
(gscag_on_ice_04, gdrfs_on_ice_04, amcd_on_ice_04, amod10_on_ice_04 ) = get_surfaces( '2004', 'snow_on_ice_area' )
(gscag_off_ice_04, gdrfs_off_ice_04, amcd_off_ice_04, amod10_off_ice_04 ) = get_surfaces( '2004', 'snow_off_ice_area' )
# In[9]:
#display_surfaces( gscag_ablation_01, gdrfs_ablation_01, amcd_ablation_01, amod10_ablation_01, '2001', 'Ablation Area')
#display_surfaces( gscag_on_ice_01, gdrfs_on_ice_01, amcd_on_ice_01, amod10_on_ice_01, '2001', 'Snow on Ice Area')
#display_surfaces( gscag_off_ice_01, gdrfs_off_ice_01, amcd_off_ice_01, amod10_ablation_01, '2001', 'Snow off Ice Area')
# In[10]:
#display_surfaces( gscag_ablation_04, gdrfs_ablation_04, amcd_ablation_04, amod10_ablation_04, '2004', 'Ablation Area')
#display_surfaces( gscag_on_ice_04, gdrfs_on_ice_04, amcd_on_ice_04, amod10_on_ice_04, '2004', 'Snow on Ice Area')
#display_surfaces( gscag_off_ice_04, gdrfs_off_ice_04, amcd_off_ice_04, amod10_ablation_04, '2004', 'Snow off Ice Area')
# In[11]:
#amcd_ablation_01[['ALBEDO_MCD.0030','ALBEDO_MCD.0047']]
#amcd_ablation_01.columns.size
#amcd_ablation_01[[amcd_ablation_01.columns[0],amcd_ablation_01.columns[amcd_ablation_01.columns.size-1]]]
#first_and_last_column(amcd_ablation_01)
#amcd_ablation_01[0:2]
# In[12]:
def mult_display_surfaces( d1, d2, d3, d4, d5, d6, d7, d8, yyyy1, yyyy2, surface_type):
title1 = surface_type + ', ' + yyyy1
title2 = surface_type + ', ' + yyyy2
fig, axes = plt.subplots(4, 2, figsize=(16,12) )
d1.plot( title=title1, ax=axes[0,0], legend=False )
d2.plot( title=title1, ax=axes[1,0], legend=False )
d3.plot( title=title1, ax=axes[2,0], legend=False )
d4.plot( title=title1, ax=axes[3,0], legend=False )
d5.plot( title=title2, ax=axes[0,1] ).legend(bbox_to_anchor=(1.5,1.0))
d6.plot( title=title2, ax=axes[1,1] ).legend(bbox_to_anchor=(1.5,1.0))
d7.plot( title=title2, ax=axes[2,1] ).legend(bbox_to_anchor=(1.5,1.0))
d8.plot( title=title2, ax=axes[3,1] ).legend(bbox_to_anchor=(1.5,1.0))
# In[13]:
mult_display_surfaces( amcd_ablation_01, amod10_ablation_01, gscag_ablation_01, gdrfs_ablation_01,
amcd_ablation_04, amod10_ablation_04, gscag_ablation_04, gdrfs_ablation_04,
'2001', '2004', 'Ablation Area')
# In[14]:
mult_display_surfaces( amcd_on_ice_01, amod10_on_ice_01, gscag_on_ice_01, gdrfs_on_ice_01,
amcd_on_ice_04, amod10_on_ice_04, gscag_on_ice_04, gdrfs_on_ice_04,
'2001', '2004', 'Snow on Ice Area')
# In[15]:
mult_display_surfaces( amcd_off_ice_01, amod10_off_ice_01, gscag_off_ice_01, gdrfs_off_ice_01,
amcd_off_ice_04, amod10_off_ice_04, gscag_off_ice_04, gdrfs_off_ice_04,
'2001', '2004', 'Snow off Ice Area')
# <h3>Values when we expanded the threshold ranges:</h3>
# In[16]:
mult_display_surfaces( first_and_last_column(amcd_ablation_01), first_and_last_column(amod10_ablation_01),
first_and_last_column(gscag_ablation_01), first_and_last_column(gdrfs_ablation_01),
first_and_last_column(amcd_ablation_04), first_and_last_column(amod10_ablation_04),
first_and_last_column(gscag_ablation_04), first_and_last_column(gdrfs_ablation_04),
'2001', '2004', 'Ablation Area')
# In[17]:
mult_display_surfaces( first_and_last_column(amcd_on_ice_01), first_and_last_column(amod10_on_ice_01),
first_and_last_column(gscag_on_ice_01), first_and_last_column(gdrfs_on_ice_01),
first_and_last_column(amcd_on_ice_04), first_and_last_column(amod10_on_ice_04),
first_and_last_column(gscag_on_ice_04), first_and_last_column(gdrfs_on_ice_04),
'2001', '2004', 'Snow on Ice Area')
# In[18]:
mult_display_surfaces( first_and_last_column(amcd_off_ice_01), first_and_last_column(amod10_off_ice_01),
first_and_last_column(gscag_off_ice_01), first_and_last_column(gdrfs_off_ice_01),
first_and_last_column(amcd_off_ice_04), first_and_last_column(amod10_off_ice_04),
first_and_last_column(gscag_off_ice_04), first_and_last_column(gdrfs_off_ice_04),
'2001', '2004', 'Snow off Ice Area')
# In[ ]:
|
apache-2.0
|
NANOGravDataManagement/bridge
|
res-plotting/ALAverage_Epochs.py
|
1
|
4179
|
# Filip Keri
# Average_Epochs.py
# A script that takes in a .par file, a .tim file, and an output directory
# as a result, it creates a new file with TOAs compressed in epochs, stored in output directory
# sample input:
# python Average_Epochs.py /Users/fkeri/Desktop/B1855+09_NANOGrav_9yv0.par /Users/fkeri/Desktop/B1855+09_NANOGrav_9yv0.tim /Users/fkeri/Desktop/
# we can see that it takes in 3 line arguments: [INPUT .par FILE], [INPUT .tim FILE], [OUTPUT DIRECTORY]
# the output file will have the same name as the input file, with "Output_for_plotting_" as a prefix: "Output_for_plotting_B1855+09_NANOGrav_9yv0.tim"
# it is possible to name the output file differently by putting the file name in [OUTPUT DIRECTORY]: /Users/fkeri/Desktop/filename.tim
import os
import sys
import math
import json
import numpy as N
import matplotlib.pyplot as P
import libstempo as T
def freq_idx( F ):
freq=[ [100, 360], [361, 600], [601, 999], [1000, 2100], [2101, 3000] ]
for i in range( 5 ):
if ( F >= freq[ i ][ 0 ] ) and ( F <= freq[ i ][ 1 ] ):
return i
return 0
F = [ 350, 450, 800, 1400, 2300 ]
output_dir = str(sys.argv[ 3 ])
psr = T.tempopulsar( parfile = str( sys.argv[ 1 ] ), timfile = str( sys.argv[ 2 ] ) )
sort_cmp = N.argsort( psr.stoas )
bin = [] # format: residuals, error, TOA, frequency
# sorting residuals, errors, frequencies, and TOAs by TOAs
Residuals = psr.residuals()[ sort_cmp ]
Errors = psr.toaerrs[ sort_cmp ]
TOA = psr.stoas[ sort_cmp ]
FREQ = psr.freqs[ sort_cmp ]
# Getting 5 numbers that we need in the grand table
minfreq = min(FREQ)
maxfreq = max(FREQ)
startMJD=min(TOA)
endMJD=max(TOA)
dataspan = (endMJD - startMJD)/(365.25)
curr = [ Residuals[ 0 ], Errors[ 0 ], TOA[ 0 ], FREQ[ 0 ] ]
for i in range( 1, len( TOA ) ):
if( TOA[ i ]-TOA[ i-1 ] > (3.0/144.0) ):
bin.append( curr )
curr = []
curr.append( Residuals[ i ] )
curr.append( Errors[ i ] )
curr.append( TOA[ i ] )
curr.append( FREQ[ i ] )
if len( curr ) > 0:
bin.append( curr )
avgResidual = []
avgError = []
avgTOA = []
avgFREQ = []
for i in range( len( bin ) ):
sum1 = [ 0.0, 0.0, 0.0, 0.0, 0.0 ]
sum2 = [ 0.0, 0.0, 0.0, 0.0, 0.0 ]
sum3 = [ 0.0, 0.0, 0.0, 0.0, 0.0 ]
sumTOA = [ 0, 0, 0, 0, 0 ]
for j in range( 0, len( bin[ i ] ), 4 ):
currRes = bin[ i ][ j ]
currErr = bin[ i ][ j+1 ]
currTOA = bin[ i ][ j+2 ]
idx = freq_idx( bin[ i ][ j+3 ] )
sum1[ idx ] += currRes/( currErr**2 )
sum2[ idx ] += 1/( currErr**2 )
sum3[ idx ] += currTOA
sumTOA[ idx ] += 1
for j in range( 5 ):
if sumTOA[ j ] > 0:
avgResidual.append( sum1[ j ]/sum2[ j ] )
avgError.append( math.sqrt( 1/sum2[ j ] ) )
avgTOA.append( sum3[ j ]/sumTOA[ j ] )
avgFREQ.append( F[ j ] )
pulsar_path = str( sys.argv[ 1 ] )
pulsar_file = pulsar_path.split('/')
pulsar_name = pulsar_file[-1].split('.')
# Get RMS of new averaged residuals
# Subtract mean
avgResidual = avgResidual - N.mean(avgResidual)
rms = 1.e6*N.sqrt(N.mean(N.square(avgResidual)))
# Put all 5 numbers into a json
#fivenumbersfile = open(psr.name+".5number", 'w')
#fivenumbersfile.write( "minfreq\t"+ str(minfreq)+ "\nmaxfreq\t"+ str(maxfreq)+ "\nstartMJD\t"+ str(startMJD)+ "\nendMJD\t"+ str(endMJD)+"\ndataspan\t" + str(dataspan)+ "\nrms"+ str(rms))
#fivenumbersfile.close()
# Make the JSON file
# THIS IS WHERE I'M WORKING.
x={}
x['minfreq'] = minfreq
x['maxfreq'] = maxfreq
x['startMJD'] = float(startMJD)
x['endMJD'] = float(endMJD)
x['dataspan'] = float(dataspan)
x['rms'] = float(rms)
fjson=open(psr.name+'.json', 'w')
json.dump(x,fjson)
# Done writing the json file
if sys.argv[3][-4] != '.':
outFile = open( os.path.join( str(sys.argv[ 3 ]), "Output_for_plotting_"+pulsar_name[0]+".tim"), "w" )
else:
outFile = open( sys.argv[3], "w" )
outFile.write( psr.name+"\n" )
outFile.write( "This program comes with ABSOLUTELY NO WARRANTY.\nThis is free software, and you are welcome to redistribute it\n" )
outFile.write( "under conditions of GPL license.\n\n\n\n" )
for i in range( len( avgTOA ) ):
outFile.write( str("{0:.15f}".format(avgTOA[ i ]))+"\t"+str("{0:.19f}".format(avgResidual[ i ]))+"\t"+str(avgFREQ[ i ])+"\t"+str("{0:.19f}".format(avgError[ i ]))+"\n" )
outFile.close()
|
apache-2.0
|
sysadminmatmoz/addons-yelizariev
|
sugarcrm_migration/import_sugarcrm.py
|
16
|
44410
|
# -*- coding: utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
try:
import MySQLdb
import MySQLdb.cursors
from pandas import merge, DataFrame
except ImportError:
pass
from openerp.addons.import_framework.import_base import import_base, create_childs
from openerp.addons.import_framework.mapper import *
import subprocess
def fix_email(text):
return text.replace('\r', '<br>')
class import_sugarcrm(import_base):
TABLE_USER = 'users'
TABLE_ACCOUNT = 'accounts'
TABLE_ACCOUNT_LEAD = 'accounts_leads'
TABLE_ACCOUNT_TAG = 'accounts_tags_'
TABLE_CONTACT = 'contacts'
TABLE_CONTACT_COMPANY = 'contacts_companies_'
TABLE_CONTACT_TAG = 'contacts_tags_'
TABLE_CASE = 'cases'
TABLE_CASE_TAG = 'cases_tags_'
#TABLE_EMPLOYEE = 'Employees'
#TABLE_OPPORTUNITY = 'Opportunities'
#TABLE_LEAD = 'Leads'
#TABLE_STAGE = 'crm_stage'
#TABLE_ATTENDEE = 'calendar_attendee'
#TABLE_CALL = 'Calls'
#TABLE_MEETING = 'Meetings'
#TABLE_TASK = 'Tasks'
#TABLE_PROJECT = 'Project'
#TABLE_PROJECT_TASK = 'ProjectTask'
#TABLE_BUG = 'Bugs'
TABLE_NOTE = 'Notes'
TABLE_NOTE_INTERNAL = 'notes_internal'
TABLE_EMAIL = 'emails'
#TABLE_COMPAIGN = 'Campaigns'
#TABLE_DOCUMENT = 'Documents'
#TABLE_HISTORY_ATTACHMNET = 'history_attachment'
def initialize(self):
self.db = MySQLdb.connect(host=self.context.get('db_host'),
port=int(self.context.get('db_port')),
user=self.context.get('db_user'),
passwd=self.context.get('db_passwd'),
db=self.context.get('db_name'),
charset='utf8',
cursorclass=MySQLdb.cursors.DictCursor
)
db_dump_fies = self.context.get('db_dump_fies')
if db_dump_fies:
cur = self.db.cursor()
for f in db_dump_fies:
_logger.info('load dump %s' % f)
fd = open(f, 'r')
subprocess.Popen(['mysql',
'-u', self.context.get('db_user'),
'-p{}'.format(self.context.get('db_passwd')),
'-h', self.context.get('db_host'),
'-P', self.context.get('db_port'),
self.context.get('db_name')], stdin=fd).wait()
cur.close()
def finalize(self):
pass
def finalize_note(self):
mail_message_obj = self.pool['mail.message']
ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model_tmp','=','mail.message')])
for a in self.pool['ir.attachment'].read(self.cr, self.uid, ids, ['id', 'res_id_tmp'], context=self.context):
if not a['res_id_tmp']:
continue
mail_message_obj.write(self.cr, self.uid, [a['res_id_tmp']],
{'attachment_ids':[(4, a['id'])]})
def get_data(self, table):
cur = self.db.cursor()
query = "SELECT * FROM %s" % table
#query = query + ' order by rand()' # for debug
cur.execute(query)
res = cur.fetchall()
cur.close()
return list(res)
def get_mapping(self):
res = [
self.get_mapping_user(),
self.get_mapping_account(),
self.get_mapping_contact(),
self.get_mapping_case(),
self.get_mapping_email(),
self.get_mapping_note_internal(),
self.get_mapping_note(),
]
return res
def merge_table_email(self, df, id_on='id'):
#mysql> select bean_module, count(*) from email_addr_bean_rel group by bean_module;
#+-------------+----------+
#| bean_module | count(*) |
#+-------------+----------+
#| Contacts | 1048 |
#| Leads | 31 |
#| Prospects | 20391 |
#| Users | 33 |
#+-------------+----------+
#4 rows in set (0.21 sec)
t1 = merge(df,
DataFrame(self.get_data('email_addr_bean_rel')),
how='left',
left_on=id_on,
suffixes=('', '_email_addr_bean_rel'),
right_on='bean_id')
t2 = merge(t1,
DataFrame(self.get_data('email_addresses')),
how='left',
left_on = 'email_address_id',
suffixes=('', '_email_addresses'),
right_on = 'id')
return t2
def table_user(self):
t1 = self.merge_table_email(DataFrame(self.get_data('users')))
return t1
def get_mapping_user(self):
return {
'name': self.TABLE_USER,
'table': self.table_user,
'models':[{
'model' : 'res.users',
'fields': {
'id': xml_id(self.TABLE_USER, 'id'),
'active': lambda record: not record['deleted'], # status == 'Active'
'name': concat('first_name', 'last_name'),
'login': value('user_name', fallback='last_name'),
'password' : 'user_hash',
'company_id/id': const('base.main_company'),
'alias_name': value('user_name', fallback='last_name', lower=True),
'email': 'email_address',
}
}]
}
def table_account(self):
t1 = merge(DataFrame(self.get_data('accounts')),
DataFrame(self.get_data('accounts_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:100] # for debug
return t1
def get_hook_tag(self, field_name):
def f(external_values):
res = []
value = external_values.get(field_name)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
v = do_clean_sugar(v)
if v:
res.append({field_name:v})
return res
return f
def tag(self, model, xml_id_prefix, field_name):
parent = xml_id_prefix + field_name
return {'model':model,
'hook':self.get_hook_tag(field_name),
'fields': {
'id': xml_id(parent, field_name),
'name': field_name,
'parent_id/id':const('sugarcrm_migration.'+parent),
}
}
def context_partner(self):
# see module description
return {"skip_addr_sync":True}
def get_mapping_account(self):
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%sfirst_name%s'%(prefix, suffix),
'%slast_name%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_ACCOUNT + '_%s%s'%(prefix, suffix), 'id'),
'name': concat('%sfirst_name%s'%(prefix, suffix), '%slast_name%s'%(prefix, suffix)),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'fax': '%sfax%s'%(prefix, suffix),
'email': '%semail%s'%(prefix, suffix),
'parent_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'function': '%sjob_title%s'%(prefix, suffix),
'customer': const('1'),
'supplier': const('0'),
},
}
partner_list = [
partner('finance_', ''),
partner('pa_', '_primary_c'),
partner('pa_', '_secondary_c'),
partner('', '_primary_c'),
partner('', '_secondary_c'),
partner('', '_quantenary_c'),
partner('', '_other_c'),
]
tag_list = [
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'initial_source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'private_sector_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'rtw_organisation_type_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sales_funnel_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'shenley_holdings_company_new_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'source_of_referral_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'status_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'introduced_by_customer_c'),
self.tag('res.partner.category', self.TABLE_ACCOUNT_TAG, 'sister_company_c'),
]
return {
'name': self.TABLE_ACCOUNT,
'table': self.table_account,
'dependencies' : [self.TABLE_USER],
'models': tag_list + [
# company
{
'model' : 'res.partner',
'context':self.context_partner,
'fields' :
{
'id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'is_company': const('1'),
'date': fixdate('date_entered'),
'active': lambda record: not record['deleted'],
'user_id/.id': user_by_login('account_manager_2_c'),
'website': first('website', 'website_c'),
'phone':'company_phone_c',
'email':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'city': 'company_city_c',
'zip': 'company_post_code_c',
#'state_id': 'company_region_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'country_id/.id': country_by_name('europe_c'),
'opt_out': mapper_int('unsubscribe_c'),
'customer': const('1'),
'supplier': const('0'),
'category_id/id': tags_from_fields(self.TABLE_ACCOUNT_TAG, ['initial_source_of_referral_c', 'private_sector_new_c', 'rtw_organisation_type_c', 'sales_funnel_c', 'shenley_holdings_company_new_c', 'source_of_referral_c', 'status_c', 'introduced_by_c', 'introduced_by_customer_c', 'sister_company_c',]),
'comment': ppconcat('website_c'),
}},
# realted lead
{
'model' : 'crm.lead',
'fields': {
'id': xml_id(self.TABLE_ACCOUNT_LEAD, 'id'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'id'),
'name': concat('name', 'first_name_c', 'last_name_c'),
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'phone':first('phone_office', 'telephone_c', 'company_phone_c'),
'email_from':first('email_address', 'email_c', lower=True),
'fax': first('phone_fax', 'fax_c', 'fax_primary_c'),
'probability': map_val('sales_funnel_c', self.map_lead_probability, 0),
'stage_id/id': map_val('status_c', self.map_lead_stage, 'crm.stage_lead1'),
'type': map_val('status_c', self.map_lead_type, 'lead'),
'section_id/id': const('sales_team.section_sales_department'),
}
}
] + partner_list # related contacts
}
map_lead_probability = {
'Lost': 0,
'Proposal Sent': 50,
'Prospect Identified': 1,
'Prospect Qualified': 20,
'Sales Won': 100,
'Scheduled': 100, #in sugarcrm: 150,
'Suspect': 0,
}
#mysql> select sales_funnel_c, count(*) from accounts_cstm group by sales_funnel_c;
#+---------------------+----------+
#| sales_funnel_c | count(*) |
#+---------------------+----------+
#| NULL | 4322 |
#| | 144 |
#| Lost | 1 |
#| Proposal Sent | 3 |
#| Prospect Identified | 5 |
#| Prospect Qualified | 20 |
#| Sales Won | 2 |
#| Scheduled | 1 |
#| Suspect | 62 |
map_lead_stage = {
'': 'crm.stage_lead7', # Lost
'Archived': 'crm.stage_lead2', # Dead
'Dorment': 'crm.stage_lead4', # Proposition
'Live Contact': 'crm.stage_lead6', # Won
'Pipeline': 'crm.stage_lead5', # Negotiation
'Prospect': 'crm.stage_lead1', # New
}
map_lead_type = {
'Dorment': 'opportunity',
'Live Contact': 'opportunity',
'Pipeline': 'opportunity',
}
#mysql> select status_c, count(*) from accounts_cstm group by status_c;
#+---------------+----------+
#| status_c | count(*) |
#+---------------+----------+
#| NULL | 210 |
#| | 655 |
#| Archived | 84 |
#| Dorment | 101 |
#| Live Contract | 73 |
#| Pipeline | 390 |
#| Prospect | 3047 |
#+---------------+----------+
def table_contact(self):
t1 = merge(DataFrame(self.get_data('contacts')),
DataFrame(self.get_data('contacts_cstm')),
left_on='id',
right_on='id_c'
)
t2 = self.merge_table_email(t1)
#t2 = t2[:10] # for debug
return t2
def get_mapping_contact(self):
tag_list = [
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'agreed_introducer_commission_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ambassador_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'consultant_type_other_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'england_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'ethnicity_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'europe_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'first_language_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'gender_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'other_languages_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'religion_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'role_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'specialism_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'status_live_new_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'trainer_type_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'training_experience_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'willing_to_travel_c'),
self.tag('res.partner.category', self.TABLE_CONTACT_TAG, 'skill_set_c'),
]
def company(field_name):
return {'model':'res.partner',
'context':self.context_partner,
'hook':self.get_hook_ignore_empty(field_name),
'fields': {
'id': xml_id(self.TABLE_CONTACT_COMPANY, field_name),
'name': field_name,
'is_company': const('1'),
'customer': const('0'),
'supplier': const('1'),
}
}
return {
'name': self.TABLE_CONTACT,
'table': self.table_contact,
'dependencies' : [self.TABLE_USER],
'models':tag_list + [company('company_name_c')] + [{
'model' : 'res.partner',
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CONTACT, 'id'),
'name': concat('title', 'first_name', 'last_name'),
'parent_id/id': xml_id(self.TABLE_CONTACT_COMPANY, 'company_name_c'),
'create_date': 'date_entered',
'write_date': 'date_modified',
'active': lambda record: not record['deleted'],
#'user_id/id': xml_id(self.TABLE_USER, 'assigned_user_id'),
'city': 'city_c',
'street': 'company_street_c',
'street2': concat('company_street_2_c','company_street_3_c'),
'zip': 'company_post_code_c',
'phone':first('company_phone_c', 'home_phone_c', 'phone_home', 'phone_work', 'phone_other', 'home_telephone_c', 'business_telephone_c'),
'mobile':first('phone_mobile', 'personal_mobile_phone_c'),
'email':first('email_c', 'email_address', 'personal_email_c', 'business_email_c', 'other_email_c', 'email_2_c'),
'website': first('website', 'website_c'),
'fax': first('phone_fax', 'company_fax_c'),
'customer': const('0'),
'supplier': const('1'),
'category_id/id': tags_from_fields(self.TABLE_CONTACT_TAG, ['agreed_commission_c', 'agreed_introducer_commission_c', 'ambassador_c', 'consultant_type_c', 'consultant_type_other_c', 'england_c', 'ethnicity_c', 'europe_c', 'first_language_c', 'gender_c', 'other_languages_c', 'religion_c', 'role_c', 'role_type_c', 'skill_set_c', 'specialism_c', 'status_live_c', 'status_live_new_c', 'trainer_type_c', 'training_experience_c', 'willing_to_travel_c', ]),
'comment': ppconcat(
'description',
'phone_home',
'phone_mobile',
'phone_work',
'phone_other',
'phone_fax',
'personal_email_c',
'business_email_c',
'other_email_c',
'home_telephone_c',
'business_telephone_c',
'personal_mobile_phone_c',
'personal_telephone_c',
'home_phone_c',
'mobile_phone_c',
'other_phone_c',
'email_c',
'email_2_c',
'company_phone_c',
'company_mobile_phone_c',
'company_fax_c',
'company_phone_other_c',
'company_email_c',
'prg_email_issued_c',
'email_address_permanent_c',
'prg_email_c',
'cjsm_email_address_c',
)
}
}]
}
def table_case(self):
t1 = merge(DataFrame(self.get_data('cases')),
DataFrame(self.get_data('cases_cstm')),
left_on='id',
right_on='id_c'
)
#t1 = t1[:10] # for debug
return t1
case_priority_mapping = {
'P1': '0',
'P2': '1',
'P3': '2'
}
case_state_mapping = {
'Awaiting Payment':'awaiting_payment',
'Cancelled':'cancelled',
'Completed':'close',
'Deferred':'pending',
'Live':'open',
'Lost':'lost',
'Pipeline':'pipeline_reactive',
'Pipeline - Proactive':'pipeline_proactive',
'Provisional':'draft',
'To be Invoiced':'to_be_invoiced',
}
def field_estimated_close_date_c(self, external_values):
estimated_close_date_c = external_values.get('estimated_close_date_c')
date = external_values.get('end_date_c')
return ''
def finalize_case(self):
ids = self.pool['account.analytic.account'].search(self.cr, self.uid, [('user_id_tmp', '!=', False)])
for r in self.pool['account.analytic.account'].read(self.cr, self.uid, ids, ['id', 'user_id_tmp']):
project_id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(r['id']))], context=self.context)
self.pool['project.project'].write(self.cr, self.uid, project_id, {'user_id':r['user_id_tmp'][0]}, context=self.context)
def get_mapping_case(self):
#mysql> select case_status_c, count(*) from cases_cstm group by case_status_c;
#+----------------------+----------+
#| case_status_c | count(*) |
#+----------------------+----------+
#| NULL | 2 |
#| | 40 |
#| Awaiting Payment | 10 |
#| Cancelled | 182 |
#| Completed | 339 |
#| Deferred | 125 |
#| Live | 25 |
#| Lost | 419 |
#| Pipeline | 60 |
#| Pipeline - Proactive | 73 |
#| Provisional | 2 |
#| To be Invoiced | 7 |
#+----------------------+----------+
def partner_participant(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scase_participant%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scase_participant%s'%(prefix, suffix),
'phone': '%sparticipant_phone%s'%(prefix, suffix),
'function': '%sparticipant_role%s'%(prefix, suffix),
'participate_in_contract_ids/id': xml_id(self.TABLE_CASE, 'id'),
'customer': const('0'),
'supplier': const('0'),
},
}
def partner(prefix, suffix):
return {'model' : 'res.partner',
'hook': self.get_hook_ignore_empty('%scontact%s'%(prefix, suffix)),
'context':self.context_partner,
'fields': {
'id': xml_id(self.TABLE_CASE + '_%s%s'%(prefix, suffix), 'id'),
'name': '%scontact%s'%(prefix, suffix),
'phone': '%sphone%s'%(prefix, suffix),
'mobile': '%smobile%s'%(prefix, suffix),
'function': '%srole%s'%(prefix, suffix),
'customer': const('0'),
'supplier': const('0'),
},
}
partner_participant_list = [
partner_participant('', '_c'),
partner_participant('', '_2_c'),
partner_participant('', '_3_c'),
]
partner_list = [
partner('primary_', '_c'),
partner('secondary_', '_c'),
]
tag_list = [
self.tag('contract.category', self.TABLE_CASE_TAG, 'business_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'probability_of_closing_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'production_funnel_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_area_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'product_type_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'reason_lost_c'),
self.tag('contract.category', self.TABLE_CASE_TAG, 'source_of_referral_c'),
]
return {
'name': self.TABLE_CASE,
'table': self.table_case,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
#self.TABLE_LEAD
],
'models': []+
tag_list+
partner_list+
[{
'model' : 'account.analytic.account',
'context': lambda : {'active_test':False},
'finalize': self.finalize_case,
'fields': {
'id': xml_id(self.TABLE_CASE, 'id'),
'name': concat('case_number_c', 'case_number', 'name', delimiter=' * '),
'type': const('contract'),
'use_tasks': const('1'),
'user_id_tmp/.id': user_by_login('case_manager_c'),
'support_manager_id/.id': user_by_login('support_case_manager_c'),
'notetaker_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id4_c', default=None),
'proof_reader_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id2_c', default=None),
'consultant_id/.id': res_id(const(self.TABLE_CONTACT), 'contact_id_c', default=None),
'business_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('secondary_', '_c')), 'id', default=None),
'commissioning_manager_id/.id': res_id(const(self.TABLE_CASE + '_%s%s'%('primary_', '_c')), 'id', default=None),
'category_id/id': tags_from_fields(self.TABLE_CASE_TAG, ['business_type_c', 'probability_of_closing_c', 'production_funnel_c', 'product_area_c', 'product_type_c', 'reason_lost_c', 'source_of_referral_c',]),
'create_date': 'date_entered',
'state': map_val('case_status_c', self.case_state_mapping, 'draft'),
'partner_id/id': xml_id(self.TABLE_ACCOUNT, 'account_id'),
'date_start':'end_date_c',
'date':call(self.field_estimated_close_date_c),
'description': ppconcat(
'invoiced_value_of_case_c',
),
}
}] +
partner_participant_list
}
def table_filter_modules(self, t, field_name='bean_module'):
newt = t[(t[field_name] == 'Accounts')|
(t[field_name] == 'Cases')|
(t[field_name] == 'Contacts')|
(t[field_name] == 'Notes')|
(t[field_name] == 'Emails')
]
return newt
def table_email(self):
t1 = merge(DataFrame(self.get_data('emails')),
DataFrame(self.get_data('emails_text')),
how='left',
left_on='id',
right_on='email_id'
)
t2 = merge(t1,
DataFrame(self.get_data('emails_beans')),
how='left',
left_on='id',
right_on='email_id',
suffixes = ('', '_emails_beans')
)
t3 = self.table_filter_modules(t2)
#t3 = t3[:100] # for debug
return t3
map_to_model = {
'Accounts': 'res.partner',
'Cases': 'project.project',
'Contacts': 'res.partner',
'Prospects': 'TODO',
'Emails': 'mail.message',
#'Notes': 'ir.attachment',
}
map_to_table = {
'Accounts': TABLE_ACCOUNT,
'Cases': TABLE_CASE,
'Contacts': TABLE_CONTACT,
'Prospects': 'TODO',
'Emails': TABLE_EMAIL,
#'Notes': TABLE_NOTE,
}
#mysql> select parent_type, count(*) from notes group by parent_type;
#+-------------+----------+
#| parent_type | count(*) |
#+-------------+----------+
#| NULL | 604 |
#| Accounts | 6385 |
#| Cases | 12149 |
#| Contacts | 41 |
#| Emails | 12445 |
#| Leads | 355 |
#| Meetings | 2 |
#+-------------+----------+
#7 rows in set (0.30 sec)
#
def get_mapping_email(self):
# mysql> select bean_module, count(*) from emails_beans group by bean_module;
# +---------------+----------+
# | bean_module | count(*) |
# +---------------+----------+
# | Accounts | 182 |
# | Cases | 1746 |
# | Contacts | 493 |
# | Leads | 102 |
# | Opportunities | 1 |
# | Prospects | 16819 |
# +---------------+----------+
# 6 rows in set (0.56 sec)
return {
'name': self.TABLE_EMAIL,
'table': self.table_email,
'dependencies' : [
self.TABLE_USER,
self.TABLE_ACCOUNT,
self.TABLE_CONTACT,
self.TABLE_CASE,
#self.TABLE_LEAD,
#self.TABLE_OPPORTUNITY,
#self.TABLE_MEETING,
#self.TABLE_CALL
],
'models':[{
'model' : 'mail.message',
'hook': self.hook_email,
'fields': {
'id': xml_id(self.TABLE_EMAIL, 'id'),
'type':const('email'),
#mysql> select type, count(*) from emails group by type;
#+----------+----------+
#| type | count(*) |
#+----------+----------+
#| archived | 17119 |
#| draft | 8 |
#| inbound | 3004 |
#| out | 75 |
#+----------+----------+
#4 rows in set (0.76 sec)
'email_from': 'from_addr_name',
'reply_to': 'reply_to_addr',
#'same_thread': 'TODO',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'partner_ids' #many2many
#attachment_ids' #many2many
#'parent_id': 'TODO',
'model': 'model',
'res_id': 'res_id',
#record_name
'subject':'name',
'date':'date_sent',
'message_id': 'message_id',
'body': call(lambda vals, html, txt: fix_email(html or txt or ''),
value('description_html'), value('description')),
'subtype_id/id':const('mail.mt_comment'),
'notified_partner_ids/.id': emails2partners('to_addrs'),
#'state' : const('received'),
#'email_to': 'to_addrs_names',
#'email_cc': 'cc_addrs_names',
#'email_bcc': 'bcc_addrs_names',
#'partner_id/.id': 'partner_id/.id',
#'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def table_note(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t.dropna(subset=['filename'])
#t = t[:10] # for debug
return t
def table_note_internal(self):
t = DataFrame(self.get_data('notes'))
t = self.table_filter_modules(t, 'parent_type')
t = t[(t['parent_type'] != 'Emails')]
#t = t[:100] # for debug
return t
def get_id_model(self, external_values, field_name='parent_id', parent_field_name='parent_type'):
id = res_id(map_val(parent_field_name, self.map_to_table), field_name)
id.set_parent(self)
id = id(external_values)
model = map_val(parent_field_name, self.map_to_model)
model = model(external_values)
if model=='project.project':
id = self.pool['project.project'].search(self.cr, self.uid, [('analytic_account_id','=', int(id))], context=self.context)
if isinstance(id, list):
id=id[0]
return str(id),model
def hook_email(self, external_values):
id,model = self.get_id_model(external_values, field_name='bean_id', parent_field_name='bean_module')
external_values['res_id']=id
external_values['model']=model
return external_values
def hook_note(self, external_values):
parent_type = external_values.get('parent_type')
contact_id = external_values.get('contact_id')
if parent_type == 'Accounts' and contact_id:
external_values['parent_type'] = 'Contacts'
id,model = self.get_id_model(external_values, field_name='contact_id')
if id:
#print 'note Accounts fixed to Contacts'
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
external_values['parent_type'] = parent_type
id,model = self.get_id_model(external_values)
if not id:
#print 'Note not found', parent_type, external_values.get('parent_id')
return None
else:
#print 'Note FOUND', parent_type, external_values.get('parent_id')
pass
external_values['res_id'] = id
external_values['res_model'] = model
return external_values
map_note_to_table = {
'Emails': TABLE_EMAIL
}
def get_mapping_note(self):
return {
'name': self.TABLE_NOTE,
'table': self.table_note,
'dependencies' : [self.TABLE_EMAIL,
self.TABLE_NOTE_INTERNAL,
],
'models':[{
'model': 'ir.attachment',
'context': lambda : {'active_test':False, 'quick_import':True},
'hook': self.hook_note,
'finalize': self.finalize_note,
'fields': {
'id': xml_id(self.TABLE_NOTE, 'id'),
'name':'filename',
'datas_fname':'filename',
'res_model': 'res_model',
'res_id': 'res_id',
'res_model_tmp': const('mail.message'),
'res_id_tmp': res_id(map_val('parent_type', self.map_note_to_table, default=self.TABLE_NOTE_INTERNAL), 'id'),
'store_fname': call(lambda external_values, id_value: 'sugarcrm_files/' + id_value,
value('id')),
'type':const('binary'),
#'description': 'description',
'description': const(''),
'create_date': 'date_entered',
'create_uid/id': xml_id(self.TABLE_USER, 'create_by'),
'company_id/id': const('base.main_company'),
}
}]
}
def get_mapping_note_internal(self):
return {
'name': self.TABLE_NOTE_INTERNAL,
'table': self.table_note_internal,
'dependencies' : [self.TABLE_EMAIL,
],
'models':[{
'model': 'mail.message',
'hook': self.hook_note,
'fields': {
'id': xml_id(self.TABLE_NOTE_INTERNAL, 'id'),
'subject':concat('name', 'filename', 'date_entered', delimiter=' * '),
'body': call(lambda vals, body: fix_email(body or ''),
value('description')),
'model': 'res_model',
'res_id': 'res_id',
'type':const('email'),
'date': 'date_entered',
'author_id/id': user2partner(self.TABLE_USER, 'created_by'),
#'subtype_id/id':const('mail.mt_comment'),
}
}]
}
def get_mapping_history_attachment(self):
# is not used
res.append({
'name': self.TABLE_HISTORY_ATTACHMNET,
'model' : 'ir.attachment',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_LEAD, self.TABLE_OPPORTUNITY, self.TABLE_MEETING, self.TABLE_CALL, self.TABLE_EMAIL],
'hook' : import_history,
'models':[{
'fields': {
'name':'name',
'user_id/id': ref(self.TABLE_USER, 'created_by'),
'description': ppconcat('description', 'description_html'),
'res_id': 'res_id',
'res_model': 'model',
'partner_id/.id' : 'partner_id/.id',
'datas' : 'datas',
'datas_fname' : 'datas_fname'
}
}]
})
def get_mapping_bug():
# is not used
return {
'name': self.TABLE_BUG,
'model' : 'project.issue',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'name': concat('bug_number', 'name', delimiter='-'),
'project_id/id': call(get_bug_project_id, 'sugarcrm_bugs'),
'categ_id/id': call(get_category, 'project.issue', value('type')),
'description': ppconcat('description', 'source', 'resolution', 'work_log', 'found_in_release', 'release_name', 'fixed_in_release_name', 'fixed_in_release'),
'priority': get_project_issue_priority,
'state': map_val('status', project_issue_state),
'assigned_to/id' : ref(self.TABLE_USER, 'assigned_user_id'),
}
}]
}
def get_mapping_project(self):
# is not used
return {
'name': self.TABLE_PROJECT,
'model' : 'project.project',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_project,
'models':[{
'fields': {
'name': 'name',
'date_start': 'estimated_start_date',
'date': 'estimated_end_date',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/.id': 'partner_id/.id',
'contact_id/.id': 'contact_id/.id',
'state': map_val('status', project_state)
}
}]
}
def get_mapping_project_task(self):
# is not used
return {
'name': self.TABLE_PROJECT_TASK,
'model' : 'project.task',
'dependencies' : [self.TABLE_USER, self.TABLE_PROJECT],
'models':[{
'fields': {
'name': 'name',
'date_start': 'date_start',
'date_end': 'date_finish',
'project_id/id': ref(self.TABLE_PROJECT, 'project_id'),
'planned_hours': 'estimated_effort',
'priority': get_project_task_priority,
'description': ppconcat('description','milestone_flag', 'project_task_id', 'task_number', 'percent_complete'),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': 'partner_id/id',
'contact_id/id': 'contact_id/id',
'state': map_val('status', project_task_state)
}
}]
}
def get_mapping_task(self):
# is not used
return {
'name': self.TABLE_TASK,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_ACCOUNT, self.TABLE_USER],
'hook' : import_task,
'models':[{
'fields': {
'name': 'name',
'date': 'date',
'date_deadline': 'date_deadline',
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'categ_id/id': call(get_category, 'crm.meeting', const('Tasks')),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': ref(self.TABLE_CONTACT,'contact_id'),
'state': map_val('status', task_state)
}
}]
}
def get_mapping_call(self):
# is not used
return {
'name': self.TABLE_CALL,
'model' : 'crm.phonecall',
'dependencies' : [self.TABLE_ACCOUNT, self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD],
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'categ_id/id': call(get_category, 'crm.phonecall', value('direction')),
'opportunity_id/id': related_ref(self.TABLE_OPPORTUNITY),
'description': ppconcat('description'),
'state': map_val('status', call_state)
}
}]
}
def get_mapping_meeting(self):
# is not used
return {
'name': self.TABLE_MEETING,
'model' : 'crm.meeting',
'dependencies' : [self.TABLE_CONTACT, self.TABLE_OPPORTUNITY, self.TABLE_LEAD, self.TABLE_TASK],
'hook': import_meeting,
'models':[{
'fields': {
'name': 'name',
'date': 'date_start',
'duration': call(get_float_time, value('duration_hours'), value('duration_minutes')),
'location': 'location',
'attendee_ids/id':'attendee_ids/id',
'alarm_id/id': call(get_alarm_id, value('reminder_time')),
'user_id/id': ref(self.TABLE_USER, 'assigned_user_id'),
'partner_id/id': related_ref(self.TABLE_ACCOUNT),
'partner_address_id/id': related_ref(self.TABLE_CONTACT),
'state': map_val('status', meeting_state)
}
}]
}
def get_mapping_opportunity(self):
# is not used
return {
'name': self.TABLE_OPPORTUNITY,
'model' : 'crm.lead',
'dependencies' : [self.TABLE_USER, self.TABLE_ACCOUNT, self.TABLE_CONTACT,self.TABLE_COMPAIGN],
'hook' : import_opp,
'models':[{
'fields': {
'name': 'name',
'probability': 'probability',
'partner_id/id': refbyname(self.TABLE_ACCOUNT, 'account_name', 'res.partner'),
'title_action': 'next_step',
'partner_address_id/id': 'partner_address_id/id',
'planned_revenue': 'amount',
'date_deadline': 'date_closed',
'user_id/id' : ref(self.TABLE_USER, 'assigned_user_id'),
'stage_id/id' : get_opportunity_status,
'type' : const('opportunity'),
'categ_id/id': call(get_category, 'crm.lead', value('opportunity_type')),
'email_from': 'email_from',
'state': map_val('status', opp_state),
'description' : 'description',
}
}]
}
def get_mapping_compaign(self):
# is not used
return {
'name': self.TABLE_COMPAIGN,
'model' : 'crm.case.resource.type',
'models':[{
'fields': {
'name': 'name',
}
}]
}
def get_mapping_employee(self):
# is not used
return {
'name': self.TABLE_EMPLOYEE,
'model' : 'hr.employee',
'dependencies' : [self.TABLE_USER],
'models':[{
'fields': {
'resource_id/id': get_ressource,
'name': concat('first_name', 'last_name'),
'work_phone': 'phone_work',
'mobile_phone': 'phone_mobile',
'user_id/id': ref(self.TABLE_USER, 'id'),
'address_home_id/id': get_user_address,
'notes': ppconcat('messenger_type', 'messenger_id', 'description'),
'job_id/id': get_job_id,
'work_email' : 'email1',
'coach_id/id_parent' : 'reports_to_id',
}
}]
}
|
lgpl-3.0
|
Novasoft-India/OperERP-AM-Motors
|
openerp/addons/resource/faces/timescale.py
|
170
|
3902
|
############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kjung/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
0asa/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
robcarver17/systematictradingexamples
|
plots_for_perhaps/divbenefits.py
|
1
|
2961
|
import Image
from random import gauss
import numpy as np
from matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, scatter
import matplotlib.pylab as plt
from itertools import cycle
import pickle
import pandas as pd
lines = ["--","-","-."]
linecycler = cycle(lines)
def twocorrelatedseries(no_periods, period_mean, period_mean2, period_vol, corr):
means = [period_mean, period_mean2]
stds = [period_vol]*2
covs = [[stds[0]**2 , stds[0]*stds[1]*corr],
[stds[0]*stds[1]*corr, stds[1]**2]]
m = np.random.multivariate_normal(means, covs, no_periods).T
data1=m[0]
data2=m[1]
empirical_corr=np.corrcoef(data1, data2)[0][1]
return empirical_corr
## path to difference for one thing no correlation
months_in_year=12
annual_vol=0.270
monthly_vol=annual_vol/(months_in_year**.5)
annual_SR=0.05
annual_SR2=0.05
diffSR=annual_SR - annual_SR2
annual_return=annual_vol*annual_SR
annual_return2=annual_vol*annual_SR2
monthly_mean=annual_return/months_in_year
monthly_mean2=annual_return2/months_in_year
## Make sure these match!
no_periods=36
monte_carlos=1000000
corr=0.75
corrdist=[twocorrelatedseries(no_periods, monthly_mean, monthly_mean2, monthly_vol, corr=corr) for ii in range(monte_carlos)]
print np.percentile(corrdist, 75)
print np.percentile(corrdist, 50)
print np.mean(corrdist)
def linehist(x, color="blue", linestyle="-", bins=10, linewidth=1):
y,binEdges =np.histogram(x, bins=bins)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
plot(bincenters,y,'-', color=color, linestyle=linestyle, linewidth=linewidth)
linehist(corrdist, bins=50, linewidth=2)
frame=plt.gca()
frame.get_yaxis().set_visible(False)
frame.set_xlim([0.5, 0.95])
frame.set_xticks([0.6, 0.7, 0.8, 0.9])
frame.set_ylim([0,100000])
frame.annotate("Average correlation", xy=(0.745, 85000),xytext=(0.6, 80000.0), arrowprops=dict(facecolor='black', shrink=0.05), size=18)
frame.annotate("75% confidence\n correlation", xy=(0.8, 83000),xytext=(0.84, 80000.0), arrowprops=dict(facecolor='black', shrink=0.05), size=18)
frame.annotate("Breakeven \n with costs", xy=(0.92, 500),xytext=(0.855, 40000.0), arrowprops=dict(facecolor='black', shrink=0.05), size=18)
plt.axvline(0.75, linestyle="--")
plt.axvline(0.8, linestyle="--")
plt.axvline(0.92, linestyle="--")
#xlabel("Difference in annual % returns between managers")
rcParams.update({'font.size': 18})
def file_process(filename):
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
fig.savefig("/home/rob/%s.png" % filename,dpi=300)
fig.savefig("/home/rob/%sLOWRES.png" % filename,dpi=50)
Image.open("/home/rob/%s.png" % filename).convert('L').save("/home/rob/%s.jpg" % filename)
Image.open("/home/rob/%sLOWRES.png" % filename).convert('L').save("/home/rob/%sLOWRES.jpg" % filename)
file_process("divbenefits")
show()
|
gpl-2.0
|
nvoron23/scipy
|
scipy/spatial/tests/test__plotutils.py
|
71
|
1463
|
from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
|
bsd-3-clause
|
navigator8972/vae_hwmotion
|
pyrbf_funcapprox.py
|
2
|
7983
|
import numpy as np
import matplotlib.pyplot as plt
class PyRBF_FunctionApproximator():
"""
an RBF function approximator for mono input and mono output...
note the features are a series of RBF basis function and a constant offset term
this is used to make the model can be easily initialized as a linear model...
"""
def __init__(self, rbf_type='gaussian', K=9, normalize=True):
self.K_ = K
self.type_ = rbf_type
self.rbf_parms_ = dict()
self.prepare_rbf_parameters()
self.theta_ = np.concatenate([np.zeros(self.K_), [0]])
self.normalize_rbf_ = normalize
self.upper_limit_ = None
self.lower_limit_ = None
#a function to map parameter theta to the linear constrainted space...
self.apply_lin_cons = None
return
def prepare_rbf_parameters(self):
#prepare rbf parameters
#gaussian
if self.type_ == 'gaussian':
self.rbf_parms_['mu'] = np.linspace(0.1, 0.9, self.K_)
self.rbf_parms_['sigma'] = 1. / self.K_
elif self.type_ == 'sigmoid':
#logistic curve, there might be other alternatives: e.g., erf, tanh
self.rbf_parms_['tau'] = self.K_ * 2
self.rbf_parms_['t0'] = np.linspace(1./self.K_, 1.0, self.K_)
else:
print 'Unknown RBF type'
return
def set_linear_equ_constraints(self, phases, target=None):
"""
this funciton allows to set linear equality constraints with the form
\Phi(phases)^T \theta == target
target is zero vector if not specified...
"""
if target is None:
const_rhs = np.zeros(len(phases))
else:
const_rhs = target
if len(const_rhs) == len(phases):
#valid constraint
#evaluate features at constrainted phase points
self.cons_feats = self.get_features(phases)
self.cons_invmat = np.linalg.pinv(self.cons_feats.T.dot(self.cons_feats))
self.cons_offset = const_rhs
self.apply_lin_cons = lambda theta_old: theta_old - self.cons_feats.dot(
self.cons_invmat.dot(self.cons_feats.T.dot(theta_old) - self.cons_offset))
return
def set_theta(self, theta):
self.theta_ = theta
return
def set_const_offset(self, offset):
#the last theta...
self.theta_[-1] = offset
return
def rbf_gaussian_evaluation(self, z, mu, sigma):
res = np.exp(-(z-mu)**2/sigma)
return res
def rbf_sigmoid_evaluation(self, z, t0, tau):
res = 1. / (1 + np.exp(-tau*(z - t0)))
return res
def set_limit(self, upper_limit=None, lower_limit=None):
if upper_limit is not None:
self.upper_limit_ = upper_limit
if lower_limit is not None:
self.lower_limit_ = lower_limit
return
def get_features(self, z):
def get_features_internal(z_var):
if self.type_ == 'gaussian':
res = np.array([ self.rbf_gaussian_evaluation(z_var, self.rbf_parms_['mu'][i], self.rbf_parms_['sigma']) for i in range(self.K_)])
if self.normalize_rbf_:
res = res / np.sum(res)
return np.concatenate([res, [1]])
elif self.type_ == 'sigmoid':
res = np.array([ self.rbf_sigmoid_evaluation(z_var, self.rbf_parms_['t0'][i], self.rbf_parms_['tau']) for i in range(self.K_)])
return np.concatenate([res, [1]])
else:
print 'Unknown RBF type'
res = [get_features_internal(z_var) for z_var in z]
return np.array(res).T
def fit(self, z, y, replace_theta=False):
"""
z: a series of phase variables...
y: function evaluation
"""
features = self.get_features(z)
U, s, V = np.linalg.svd(features.T)
significant_dims = len(np.where(s>1e-6)[0])
inv_feats = V.T[:, 0:significant_dims].dot(np.diag(1./s[0:significant_dims])).dot(U[:, 0:significant_dims].T)
res_theta = inv_feats.dot(y)
if replace_theta:
# print 'use fit parameters'
self.theta_ = res_theta
return res_theta
def evaluate(self, z, theta=None, trunc_limit=True):
"""
evaluate with given phase variables
"""
features = self.get_features(z)
if theta is None:
#use model parameters
res = features.T.dot(self.theta_)
else:
#use given parameters
res = features.T.dot(theta)
#truncate with limit if desired
if trunc_limit:
#are limits valid?
if self.upper_limit_ is not None and self.lower_limit_ is not None:
if self.upper_limit_ > self.lower_limit_:
res[res > self.upper_limit_] = self.upper_limit_
res[res < self.lower_limit_] = self.lower_limit_
return res
def gaussian_sampling(self, theta=None, noise=None, n_samples=10):
'''
conducting local gaussian sampling with the given mean theta and noise
use the current theta is the mean theta is None
use unit noise if covariance matrix is not given
'''
if theta is None:
mean = self.theta_
else:
mean = theta
if noise is None:
covar = np.eye(len(self.theta_))
elif isinstance(noise, int) or isinstance(noise, float):
covar = np.eye(len(self.theta_)) * noise
else:
covar = noise
#make white gaussian because we might need to apply the linear constraints...
#<hyin/Feb-07th-2016> hmm, actually this is shifted noise, so remember not to apply that again
samples = np.random.multivariate_normal(mean, covar, n_samples)
if self.apply_lin_cons is None:
res = samples
else:
#apply linear constraint to apply the null-space perturbation
res = [self.apply_lin_cons(s) for s in samples]
return np.array(res)
def PyRBF_FuncApprox_Test():
#test
#fit sin
n_samples = 100
z = np.linspace(0.0, 1.0, 100)
y = np.cos(2*np.pi*z)
#feature parms
mu = np.arange(0.1, 1.0, 0.1)
sigma = 1./len(mu)
#model
rbf_mdl = PyRBF_FunctionApproximator(rbf_type='sigmoid', K=10, normalize=True)
#fit
res_theta = rbf_mdl.fit(z, y, True)
print 'fit parameters:', res_theta
y_hat = rbf_mdl.evaluate(z[n_samples/4:3*n_samples/4])
#draw the results
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
ax.plot(z, y, linewidth=3.0)
ax.plot(z[n_samples/4:3*n_samples/4], y_hat, '.-', linewidth=3.0)
plt.draw()
#test for sampling and apply linear constrains
raw_input('Press ENTER to continue the test of random sampling')
rbf_mdl = PyRBF_FunctionApproximator(rbf_type='gaussian', K=10, normalize=True)
y = np.sin(np.linspace(0.0, np.pi, len(z)))
res_theta = rbf_mdl.fit(z, y, True)
print 'fit parameters:', res_theta
#anchoring the initial point...
rbf_mdl.set_linear_equ_constraints([z[0]], [y[0]])
#sampling...
init_fix_samples = rbf_mdl.gaussian_sampling()
init_fix_trajs = [rbf_mdl.evaluate(z, s) for s in init_fix_samples]
#anchoring both end points...
rbf_mdl.set_linear_equ_constraints([z[0], z[-1]], [y[0], y[-1]])
both_fix_samples = rbf_mdl.gaussian_sampling()
both_fix_trajs = [rbf_mdl.evaluate(z, s) for s in both_fix_samples]
print init_fix_samples, both_fix_samples
#show them...
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.hold(True)
for traj in init_fix_trajs:
ax1.plot(z, traj, linewidth=3.0)
plt.draw()
ax2 = fig.add_subplot(212)
ax2.hold(True)
for traj in both_fix_trajs:
ax2.plot(z, traj, linewidth=3.0)
plt.draw()
return
|
gpl-3.0
|
hernick-qc/dRonin
|
python/ins/cins.py
|
11
|
3838
|
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import ins
# this is the set of (currently) recommend INS settings. modified from
# https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav
default_mag_var = numpy.array([10.0, 10.0, 100.0])
default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4])
default_accel_var = numpy.array([0.01, 0.01, 0.01])
default_baro_var = 0.1
default_gps_var=numpy.array([1e-3,1e-2,10])
class CINS:
GRAV = 9.805
def __init__(self):
""" Creates the CINS class.
Important variables are
* X - the vector of state variables
* Xd - the vector of state derivatives for state and inputs
* Y - the vector of outputs for current state value
"""
self.state = []
def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None):
""" configure the INS parameters """
if mag_var is not None:
ins.configure(mag_var=mag_var)
if gyro_var is not None:
ins.configure(gyro_var=gyro_var)
if accel_var is not None:
ins.configure(accel_var=accel_var)
if baro_var is not None:
ins.configure(baro_var=baro_var)
if gps_var is not None:
ins.configure(gps_var=gps_var)
def prepare(self):
""" prepare the C INS wrapper
"""
self.state = ins.init()
self.configure(
mag_var=default_mag_var,
gyro_var=default_gyro_var,
accel_var=default_accel_var,
baro_var=default_baro_var,
gps_var=default_gps_var
)
def predict(self, gyros, accels, dT = 1.0/666.0):
""" Perform the prediction step
"""
self.state = ins.prediction(gyros, accels, dT)
def correction(self, pos=None, vel=None, mag=None, baro=None):
""" Perform the INS correction based on the provided corrections
"""
sensors = 0
Z = numpy.zeros((10,),numpy.float64)
# the masks must match the values in insgps.h
if pos is not None:
sensors = sensors | 0x0003
Z[0] = pos[0]
Z[1] = pos[1]
if vel is not None:
sensors = sensors | 0x0038
Z[3] = vel[0]
Z[4] = vel[1]
Z[5] = vel[2]
if mag is not None:
sensors = sensors | 0x01C0
Z[6] = mag[0]
Z[7] = mag[1]
Z[8] = mag[2]
if baro is not None:
sensors = sensors | 0x0200
Z[9] = baro
self.state = ins.correction(Z, sensors)
def test():
""" test the INS with simulated data
"""
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
sim = PyINS()
sim.prepare()
dT = 1.0 / 666.0
STEPS = 100000
history = numpy.zeros((STEPS,16))
history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
ROLL = 0.1
YAW = 0.2
sim.predict(U=[0,0,YAW, 0, PyINS.GRAV*sin(ROLL), -PyINS.GRAV*cos(ROLL) - 0.0], dT=dT)
history[k,:] = sim.state
history_rpy[k,:] = quat_rpy(sim.state[6:10])
times[k] = k * dT
angle = 0*numpy.pi/3 + YAW * dT * k # radians
height = 1.0 * k * dT
if True and k % 60 == 59:
sim.correction(pos=[[10],[5],[-height]])
if True and k % 60 == 59:
sim.correction(vel=[[0],[0],[-1]])
if k % 20 == 8:
sim.correction(baro=[height])
if True and k % 20 == 15:
sim.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]])
if k % 1000 == 0:
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],history[0:k:4,0:3])
ax[0][0].set_title('Position')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.draw()
fig.show()
plt.show()
if __name__ =='__main__':
test()
|
gpl-3.0
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/datasets/mlcomp.py
|
46
|
4089
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
from sklearn.utils import deprecated
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
@deprecated("since the http://mlcomp.org/ website will shut down "
"in March 2017, the load_mlcomp function was deprecated "
"in version 0.19 and will be removed in 0.21.")
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
mit
|
hubwayPredict/main
|
Graph_Code/hubway2013_graph.py
|
1
|
7847
|
"""Part of Hubway Prediction project by Shane Kelly, William Lu, and
Kevin Crispie, Olin College of Engineering
"""
import csv
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
def get_file(file_path):
""" reads a csv file and returns a file file_id
"""
f = open(file_path, 'r') #open with read
bike_reader = csv.reader(f)
return bike_reader
def get_month_days(file_id):
"""takes a file id and returns month_days, a list of all the individual
Hubway trips in a month in a list of lists. Each nested list has all the
dates for each trip in the given month_days
"""
standard_datetime = []
for line in file_id:
standard_datetime.append(line[4]) #appends trip start date to list
standard_datetime = standard_datetime [1::] #removes header text from list
month = []
day = []
year = []
#creates lists of trip months, days, and years, multiplicty is number of trips during
#that time period
for i in range(len(standard_datetime)):
only_date = standard_datetime[i].split(' ')
only_date_string = only_date[0]
split_date_string = only_date_string.split('/') #separate out parts of date
month.append(split_date_string[0])
day.append(split_date_string[1])
year.append(split_date_string[2])
#print day
april = day[month.index('4'):month.index('5')]
may = day[month.index('5'):month.index('6')]
june = day[month.index('6'):month.index('7')]
july = day[month.index('7'):month.index('8')]
august = day[month.index('8'):month.index('9')]
september = day[month.index('9'):month.index('10')]
october = day[month.index('10'):month.index('11')]
november = day[month.index('11')::]
return [april, may, june, july, august, september, october, november]
def fourierExtrapolation(x, n_predict):
n = x.size
n_harm = 4 # number of harmonics in model
t = np.arange(0, n)
p = np.polyfit(t, x, 1) # find linear trend in x
x_notrend = x - p[0] * t # detrended x
x_freqdom = fft.fft(x_notrend) # detrended x in frequency domain
f = fft.fftfreq(n) # frequencies
indexes = range(n)
# sort indexes by frequency, lower -> higher
indexes.sort(key = lambda i: np.absolute(f[i]))
t = np.arange(0, n + n_predict)
restored_sig = np.zeros(t.size)
for i in indexes[:1 + n_harm * 2]:
ampli = np.absolute(x_freqdom[i]) / n # amplitude
phase = np.angle(x_freqdom[i]) # phase
restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)
return restored_sig + p[0] * t
def day_of_week_classifier(data):
#Hubway opened on April 2nd, a Tuesday, in 2013
tues_start = 0
wed_start = 1
thurs_start = 2
fri_start = 3
sat_start = 4
sun_start = 5
mon_start = 6
mon = data[mon_start::7]
tues = data[tues_start::7]
wed = data[wed_start::7]
thurs = data[thurs_start::7]
fri = data[fri_start::7]
sat = data[sat_start::7]
sun = data[sun_start::7]
return (mon, tues, wed, thurs, fri, sat, sun)
def sum_daily_totals(daily_totals):
mon_sum = sum(daily_totals[0])
tues_sum = sum(daily_totals[1])
wed_sum = sum(daily_totals[2])
thurs_sum = sum(daily_totals[3])
fri_sum = sum(daily_totals[4])
sat_sum = sum(daily_totals[5])
sun_sum = sum(daily_totals[6])
return (mon_sum, tues_sum, wed_sum, thurs_sum, fri_sum, sat_sum, sun_sum)
def average_daily_totals(daily_totals):
mon_ave = sum(daily_totals[0])/len(daily_totals[0])
tues_ave = sum(daily_totals[1])/len(daily_totals[1])
wed_ave = sum(daily_totals[2])/len(daily_totals[2])
thurs_ave = sum(daily_totals[3])/len(daily_totals[3])
fri_ave = sum(daily_totals[4])/len(daily_totals[4])
sat_ave = sum(daily_totals[5])/len(daily_totals[5])
sun_ave = sum(daily_totals[6])/len(daily_totals[6])
return (mon_ave, tues_ave, wed_ave, thurs_ave, fri_ave, sat_ave, sun_ave)
def get_diff_average(averages):
"""Returns the difference between each datapoint and the average of the dataset
It is used to calculate the difference bewteen the daily totals and the
average totals for each day. It is returned as a ratio.
"""
all_averaged = np.mean(averages)
ratio_diffs = []
for x in range(len(averages)):
ratio_diffs.append((averages[x] - all_averaged) / all_averaged)
return ratio_diffs
def main():
file_path = 'HubwayData/2013_hubway_trips.csv'
bike_reader = get_file(file_path)
month_days = get_month_days(bike_reader)
#separate out trips by month
april = month_days[0]
may = month_days[1]
june = month_days[2]
july = month_days[3]
august = month_days[4]
september = month_days[5]
october = month_days[6]
november = month_days[7]
#count number of trips for each day, separated by month
april_count = []
for x in range(1,32):
april_count.append(april.count(str(x)))
april_count = april_count[2:-1]
may_count = []
for x in range(1,32):
may_count.append(may.count(str(x)))
june_count = []
for x in range(1,32):
june_count.append(june.count(str(x)))
june_count = june_count[:-1]
july_count = []
for x in range(1,32):
july_count.append(july.count(str(x)))
august_count = []
for x in range(1,32):
august_count.append(august.count(str(x)))
september_count = []
for x in range(1,32):
september_count.append(september.count(str(x)))
september_count = september_count[:-1]
october_count = []
for x in range(1,32):
october_count.append(october.count(str(x)))
november_count = []
for x in range(1,32):
november_count.append(november.count(str(x)))
november_count = november_count[:-1]
#get a list of number of trips for each month
all_months_count = april_count + may_count + june_count + july_count + august_count + september_count + october_count + november_count
#This code plots in 4 different graphs a polynomial regression,
#a bar chart of the total riders on each day of the week,
#the average riders per day of the week, and a bar chart
#of all the Mondays in the year.
#polynomial regression
fig1 = plt.figure(1)
yreg = all_months_count
xreg = range(len(yreg)) #each day counts up by 1
fit = polyfit(xreg,yreg,4) #regression
fit_fn = poly1d(fit) #generate polynomial from regression function
ax1 = fig1.add_subplot(111)
ax1.plot(xreg,yreg,'yo', xreg, fit_fn(xreg), '--k') #plot regression
#regular line plot
#plt.plot(all_months_count)
#Fourier Transform Regression
"""
xfour = np.array(yreg[70:70+21])
n_predict = len(xreg[70:70+21])
extrapolation = fourierExtrapolation(xfour, n_predict)
plt.plot(np.arange(0, extrapolation.size), extrapolation, 'r', label = 'extrapolation')
plt.plot(np.arange(0, xfour.size), xfour, 'b', label = 'x', linewidth = 3)
plt.plot(xreg[21:21+21],all_months_count[70+21:70+21+21])
plt.legend()
plt.show()
"""
ax1.set_xlabel('Day of Operation')
ax1.set_ylabel('Number of Riders')
ax1.set_title('Hubway Ridership in 2013')
daily_totals = day_of_week_classifier(all_months_count)
sum_totals = sum_daily_totals(daily_totals)
fig2 = plt.figure(2)
ax2 = fig2.add_subplot(111)
ax2.bar(range(7),sum_totals, 1/1.5, color = "blue")
ax2.set_xlabel('Day of Week')
ax2.set_ylabel('Amount of Riders')
ax2.set_title('Total Ridership by Day')
ave_totals = average_daily_totals(daily_totals)
fig3 = plt.figure(3)
ax3 = fig3.add_subplot(111)
ax3.bar(range(7),ave_totals, 1/1.5, color = "blue")
ax3.set_xlabel('Day of Week')
ax3.set_ylabel('Amount of Riders')
ax3.set_title('Average Ridership by Day')
fig4 = plt.figure(4)
ax4 = fig4.add_subplot(111)
ax4.bar(range(len(daily_totals[0])),daily_totals[0], 1/1.5, color = "blue")
ax4.set_xlabel('Time of Year')
ax4.set_ylabel('Amount of Riders')
ax4.set_title('Average Ridership for Mondays')
show()
#determine the ratio of the difference between the daily ridership and
#the average daily ridership and the average ridership
ratio_diffs = get_diff_average(ave_totals)
if __name__ == "__main__":
main()
|
mit
|
MartinThoma/algorithms
|
ML/gtsdb/analyze_model.py
|
2
|
3344
|
#!/usr/bin/env python
"""Analyze a cifar100 keras model."""
import io
import json
import gtsdb
import matplotlib.pyplot as plt
import numpy as np
from keras.models import load_model
from sklearn.model_selection import train_test_split
try:
to_unicode = unicode
except NameError:
to_unicode = str
n_classes = gtsdb.n_classes
def plot_cm(cm, zero_diagonal=False, labels=None):
"""Plot a confusion matrix."""
n = len(cm)
if zero_diagonal:
for i in range(n):
cm[i][i] = 0
size = int(n / 4.)
fig = plt.figure(figsize=(size, size), dpi=80, )
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
if labels is None:
labels = [i for i in range(len(cm))]
x = [i for i in range(len(cm))]
plt.xticks(x, labels, rotation='vertical')
y = [i for i in range(len(cm))]
plt.yticks(y, labels) # , rotation='vertical'
res = ax.imshow(np.array(cm), cmap=plt.cm.viridis,
interpolation='nearest')
width, height = cm.shape
fig.colorbar(res)
plt.savefig('confusion_matrix.png', format='png')
def load_data():
"""Load data."""
data = gtsdb.load_data()
X_train = data['x_train']
y_train = data['y_train']
X_val = None
y_val = None
# X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
# test_size=0.10,
# random_state=42)
X_train = X_train.astype('float32')
# X_val = X_val.astype('float32')
# X_test = X_test.astype('float32')
X_train /= 255
# X_val /= 255
return X_train, X_val, y_train, y_val
def main(model_path):
# Load model
model = load_model(model_path)
X_train, X_val, y_train, y_val = load_data()
X = X_train
y = y_train
# Calculate confusion matrix
y_i = y.flatten()
y_pred = model.predict(X)
y_pred_i = y_pred.argmax(1)
cm = np.zeros((n_classes, n_classes), dtype=np.int)
for i, j in zip(y_i, y_pred_i):
cm[i][j] += 1
# Set "no sign" to 0
ignore_no_sign = False
if ignore_no_sign:
for i in range(n_classes):
cm[i][len(cm) - 1] = 0
cm[len(cm) - 1][i] = 0
acc = sum([cm[i][i] for i in range(n_classes)]) / float(cm.sum())
print("Accuracy: {:0.2f}%".format(acc * 100))
# Create plot
plot_cm(cm, zero_diagonal=True, labels=gtsdb.labels_short)
# Serialize confusion matrix
with open('cm.json', 'w', encoding='utf8') as outfile:
str_ = json.dumps(cm.tolist(),
indent=4, sort_keys=True,
separators=(',', ':'), ensure_ascii=False)
outfile.write(to_unicode(str_))
def get_parser():
"""Get parser object for script xy.py."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="model_path",
help="Path to a Keras model file",
metavar="model.h5",
required=True)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.model_path)
|
mit
|
cwu2011/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
42
|
52642
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
MartinDelzant/scikit-learn
|
examples/ensemble/plot_gradient_boosting_regression.py
|
227
|
2520
|
"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
|
bsd-3-clause
|
google-research/google-research
|
graph_embedding/dmon/param_dgi.py
|
1
|
6071
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TODO(tsitsulin): add headers, tests, and improve style."""
import os
from absl import app
from absl import flags
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.layers.gcn import GCN
from graph_embedding.dmon.models.dgi import deep_graph_infomax
from graph_embedding.dmon.utilities.graph import load_kipf_data
from graph_embedding.dmon.utilities.graph import load_npz_to_sparse_graph
from graph_embedding.dmon.utilities.graph import normalize_graph
from graph_embedding.dmon.utilities.graph import scipy_to_tf
from graph_embedding.dmon.utilities.metrics import conductance
from graph_embedding.dmon.utilities.metrics import modularity
from graph_embedding.dmon.utilities.metrics import precision
from graph_embedding.dmon.utilities.metrics import recall
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string('graph_path', None, 'Input graph path')
flags.DEFINE_string('output_path', None, 'Output results path')
flags.DEFINE_string('architecture', None, 'Network architecture')
flags.DEFINE_string('load_strategy', 'schur', 'Graph format')
flags.DEFINE_string('postfix', '', 'File postfix')
flags.DEFINE_integer('n_clusters', 16, 'Number of clusters', lower_bound=0)
flags.DEFINE_integer('n_epochs', 1000, 'Number of epochs', lower_bound=0)
flags.DEFINE_integer('patience', 20, 'Patience parameter', lower_bound=0)
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate', lower_bound=0)
def format_filename():
graph_name = os.path.split(FLAGS.graph_path)[1]
architecture_str = FLAGS.architecture.strip('[]')
return (f'{FLAGS.output_path}/{graph_name}-'
f'nclusters-{FLAGS.n_clusters}-'
f'architecture-{architecture_str}-'
f'lr-{FLAGS.learning_rate}-'
f'epochs-{FLAGS.n_epochs}'
f'postfix-{FLAGS.postfix}'
'.txt')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Starting', format_filename())
if FLAGS.load_strategy == 'schur':
adjacency, features, labels, label_mask = load_npz_to_sparse_graph(
FLAGS.graph_path)
elif FLAGS.load_strategy == 'kipf':
adjacency, features, labels, label_mask = load_kipf_data(
*os.path.split(FLAGS.graph_path))
else:
raise Exception('Unknown loading strategy!')
n_nodes = adjacency.shape[0]
feature_size = features.shape[1]
architecture = [int(x) for x in FLAGS.architecture.strip('[]').split('_')]
graph_clean_normalized = scipy_to_tf(
normalize_graph(adjacency.copy(), normalized=True))
input_features = tf.keras.layers.Input(shape=(feature_size,))
input_features_corrupted = tf.keras.layers.Input(shape=(feature_size,))
input_graph = tf.keras.layers.Input((n_nodes,), sparse=True)
encoder = [GCN(512) for size in architecture]
model = deep_graph_infomax(
[input_features, input_features_corrupted, input_graph], encoder)
def loss(model, x, y, training):
_, y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
for loss_internal in model.losses:
loss_value += loss_internal
return loss_value, tape.gradient(loss_value, model.trainable_variables)
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)
patience = 20
best_loss = 999
patience_counter = 0
for epoch in range(FLAGS.n_epochs):
features_corr = features.copy()
pseudolabels = tf.concat([tf.zeros([n_nodes, 1]), tf.ones([n_nodes, 1])], 0)
features_corr = features_corr.copy()
np.random.shuffle(features_corr)
loss_value, grads = grad(model,
[features, features_corr, graph_clean_normalized],
pseudolabels)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
loss_value = loss_value.numpy()
print(epoch, loss_value)
if loss_value > best_loss:
patience_counter += 1
if patience_counter == patience:
break
else:
best_loss = loss_value
patience_counter = 0
representations = model([features, features, graph_clean_normalized],
training=False)[0].numpy()
clf = KMeans(n_clusters=FLAGS.n_clusters)
clf.fit(representations)
clusters = clf.labels_
print('Conductance:', conductance(adjacency, clusters))
print('Modularity:', modularity(adjacency, clusters))
print(
'NMI:',
normalized_mutual_info_score(
labels, clusters[label_mask], average_method='arithmetic'))
print('Precision:', precision(labels, clusters[label_mask]))
print('Recall:', recall(labels, clusters[label_mask]))
with open(format_filename(), 'w') as out_file:
print('Conductance:', conductance(adjacency, clusters), file=out_file)
print('Modularity:', modularity(adjacency, clusters), file=out_file)
print(
'NMI:',
normalized_mutual_info_score(
labels, clusters[label_mask], average_method='arithmetic'),
file=out_file)
print('Precision:', precision(labels, clusters[label_mask]), file=out_file)
print('Recall:', recall(labels, clusters[label_mask]), file=out_file)
if __name__ == '__main__':
app.run(main)
|
apache-2.0
|
Leguark/pygeomod
|
pygeomod/struct_data.py
|
3
|
17593
|
"""Analysis and modification of structural data exported from GeoModeller
All structural data from an entire GeoModeller project can be exported into ASCII
files using the function in the GUI:
Export -> 3D Structural Data
This method generates files for defined geological parameters:
"Points" (i.e. formation contact points) and
"Foliations" (i.e. orientations/ potential field gradients).
Exported parameters include all those defined in sections as well as 3D data points.
This package contains methods to check, visualise, and extract/modify parts of these
exported data sets, for example to import them into a different Geomodeller project.
"""
# import os, sys
import numpy as np
import numpy.random
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
class Struct3DPoints():
"""Class container for 3D structural points data sets"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ptype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ptype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['formation'] = l[form_id]
def get_formation_names(self):
"""Get names of all formations that have a point in this data set
and store in:
self.formation_names
"""
# self.formation_names = np.unique(self.formations)
self.formation_names = np.unique(self.points[:]['formation'])
def get_range(self):
"""Update min, max for all coordinate axes and store in
self.xmin, self.xmax, ..."""
self.xmin = np.min(self.points['x'])
self.ymin = np.min(self.points['y'])
self.zmin = np.min(self.points['z'])
self.xmax = np.max(self.points['x'])
self.ymax = np.max(self.points['y'])
self.zmax = np.max(self.points['z'])
def create_formation_subset(self, formation_names):
"""Create a subset (as another Struct3DPoints object) with specified formations only
**Arguments**:
- *formation_names* : list of formation names
**Returns**:
Struct3DPoints object with subset of points
"""
# create new object
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = True
else:
ids[self.points['formation'] == formation_names] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def remove_formations(self, formation_names):
"""Remove points for specified formations from the point set
This function can be useful, for example, to remove one formation, perform
a thinning operation, and then add it back in with the `combine_with` function.
**Arguments**:
- *formation_names* = list of formations to be removed (or a single string to
remove only one formation)
"""
# Note: implementation is very similar to create_formation_subset, only inverse
# and changes in original point set!
# determine ids for all points of these formations:
ids = np.ndarray((self.len), dtype='bool')
ids[:] = True
if type(formation_names) == list:
for formation in formation_names:
ids[self.points['formation'] == formation] = False
else:
ids[self.points['formation'] == formation_names] = False
self.len = np.sum(ids)
# extract points
self.points = self.points[ids]
# update range
self.get_range()
# update formation names
self.get_formation_names()
def rename_formations(self, rename_dict):
"""Rename formation according to assignments in dictionary
Mapping in dictionary is of the form:
old_name_1 : new_name_1, old_name_2 : new_name_2, ...
"""
for k,v in rename_dict.items():
print("Change name from %s to %s" % (k,v))
for p in self.points:
if p['formation'] == k: p['formation'] = v
# update formation names
self.get_formation_names()
def extract_range(self, **kwds):
"""Extract subset for defined ranges
Pass ranges as keywords: from_x, to_x, from_y, to_y, from_z, to_z
All not defined ranges are simply kept as before
**Returns**:
pts_subset : Struct3DPoints data subset
"""
from_x = kwds.get("from_x", self.xmin)
from_y = kwds.get("from_y", self.ymin)
from_z = kwds.get("from_z", self.zmin)
to_x = kwds.get("to_x", self.xmax)
to_y = kwds.get("to_y", self.ymax)
to_z = kwds.get("to_z", self.zmax)
# create new object
# pts_subset = Struct3DPoints()
pts_subset = self.__class__()
# determine ids for points in range
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] >= from_x) *
(self.points['y'] >= from_y) *
(self.points['z'] >= from_z) *
(self.points['x'] <= to_x) *
(self.points['y'] <= to_y) *
(self.points['z'] <= to_z)] = True
# new length is identical to sum of ids bool array (all True elements)
pts_subset.len = np.sum(ids)
# extract points
pts_subset.points = self.points[ids]
# update range
pts_subset.get_range()
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def thin(self, nx, ny, nz, **kwds):
"""Thin data for one formations on grid with defined number of cells and store as subset
**Arguments**:
- *nx*, *ny*, *nz* = int : number of cells in each direction for thinning grid
The thinning is performed on a raster and not 'formation-aware',
following this simple procedure:
(1) Iterate through grid
(2) If multiple points for formation in this cell: thin
(3a) If thin: Select one point in cell at random and keep this one!
(3b) else: if one point in raneg, keep it!
Note: Thinning is performed for all formations, so make sure to create a subset
for a single formation first!
**Returns**:
pts_subset = Struct3DPoints : subset with thinned data for formation
"""
# DEVNOTE: This would be an awesome function to parallelise! Should be quite simple!
# first step: generate subset
# pts_subset = self.create_formation_subset([formation])
# create new pointset:
# reference to own class type for consistency with Struct3DFoliations
pts_subset = self.__class__()
# determine cell boundaries of subset for thinning:
delx = np.ones(nx) * (self.xmax - self.xmin) / nx
bound_x = self.xmin + np.cumsum(delx)
dely = np.ones(ny) * (self.ymax - self.ymin) / ny
bound_y = self.ymin + np.cumsum(dely)
delz = np.ones(nz) * (self.zmax - self.zmin) / nz
bound_z = self.zmin + np.cumsum(delz)
ids_to_keep = []
for i in range(nx-1):
for j in range(ny-1):
for k in range(nz-1):
# determin number of points in this cell
ids = np.ndarray((self.len), dtype='bool')
ids[:] = False
ids[(self.points['x'] > bound_x[i]) *
(self.points['y'] > bound_y[j]) *
(self.points['z'] > bound_z[k]) *
(self.points['x'] < bound_x[i+1]) *
(self.points['y'] < bound_y[j+1]) *
(self.points['z'] < bound_z[k+1])] = True
if np.sum(ids) > 1:
# Thinning required!
# keep random point
ids_to_keep.append(numpy.random.choice(np.where(ids)[0]))
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[id_to_keep]
# assign to new pointset:
elif np.sum(ids) == 1:
# keep the one point, of course!
# pts_subset.points[nx * ny * i + ny * j + k] = self.points[ids[0]]
ids_to_keep.append(ids[0])
# now get points for all those ids:
# extract points
pts_subset.points = self.points[np.array(ids_to_keep)]
# update range
pts_subset.get_range()
# update length
pts_subset.len = len(pts_subset.points)
# update formation names
pts_subset.get_formation_names()
# get header from original
pts_subset.header = self.header
return pts_subset
def combine_with(self, pts_set):
"""Combine this point set with another point set
**Arguments**:
- *pts_set* = Struct3DPoints : points set to combine
"""
self.points = np.concatenate((self.points, pts_set.points))
# update range and everything
self.get_range()
self.get_formation_names()
self.len = len(self.points)
def plot_plane(self, plane=('x','y'), **kwds):
"""Create 2-D plots for point distribution
**Arguments**:
- *plane* = tuple of plane axes directions, e.g. ('x','y') (default)
**Optional Keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
color = kwds.get("color", 'b')
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure()
ax = fig.add_subplot(111)
if kwds.has_key("formation_names"):
pts_subset = self.create_formation_subset(kwds['formation_names'])
ax.plot(pts_subset.points[:][plane[0]], pts_subset.points[:][plane[1]], '.', color = color)
else:
ax.plot(self.points[:][plane[0]], self.points[:][plane[1]], '.', color = color)
def plot_3D(self, **kwds):
"""Create a plot of points in 3-D
**Optional keywords**:
- *ax* = matplotlib axis object: if provided, plot is attached to this axis
- *formation_names* = list of formations : plot only points for specific formations
"""
if kwds.has_key("ax"):
# axis is provided, attach here
ax = kwds['ax']
else:
fig = plt.figure(figsize = (10,8))
ax = fig.add_subplot(111, projection='3d')
if kwds.has_key("formation_names"):
# create a subset with speficied formations, only
pts_subset = self.create_formation_subset(kwds['formation_names'])
pts_subset.plot_3D(ax = ax)
else:
# plot all
ax.scatter(self.points['x'], self.points['y'], self.points['z'])
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%s\n" % (point['x'], point['y'], point['z'], point['formation']))
f.close()
class Struct3DFoliations(Struct3DPoints):
"""Class container for foliations (i.e. orientations) exported from GeoModeller
Mainly based on Struct3DPoints as must required functionality
for location of elements - some functions overwritten, e.g. save and parse to read orientation data,
as well!
However, further methods might be added or adapted in the future, for example:
- downsampling according to (eigen)vector methods, e.g. the work from the Monash guys, etc.
- ploting of orientations in 2-D and 3-D
"""
def __init__(self, **kwds):
"""Structural points data set
**Optional keywords**:
- *filename* = string : filename of csv file with exported points to load
"""
# store point information in purpose defined numpy record
self.ftype = np.dtype([('x', np.float32),
('y', np.float32),
('z', np.float32),
('azimuth', np.float32),
('dip', np.float32),
('polarity', np.int),
('formation', np.str_, 32)])
if kwds.has_key("filename"):
self.filename = kwds['filename']
# read data
self.parse()
self.get_formation_names()
self.get_range()
def parse(self):
"""Parse filename and load data into numpy record
The point information is stored in a purpose defined numpy record
self.points
"""
f = open(self.filename, "r")
lines = f.readlines()
self.header = lines[0]
# determine position of elements in header (for extension to foliations, etc.)
h_elem = np.array(self.header.rstrip().split(','))
x_id = np.where(h_elem == 'X')[0]
y_id = np.where(h_elem == 'Y')[0]
z_id = np.where(h_elem == 'Z')[0]
azi_id = np.where(h_elem == 'azimuth')[0]
dip_id = np.where(h_elem == 'dip')[0]
pol_id = np.where(h_elem == 'polarity')[0]
form_id = np.where(h_elem == 'formation')[0]
# print x_id
# create numpy array for points
self.len = (len(lines)-1)
self.points = np.ndarray(self.len, dtype = self.ftype)
for i,line in enumerate(lines[1:]):
l = line.rstrip().split(',')
self.points[i]['x'] = float(l[x_id])
self.points[i]['y'] = float(l[y_id])
self.points[i]['z'] = float(l[z_id])
self.points[i]['azimuth'] = float(l[azi_id])
self.points[i]['dip'] = float(l[dip_id])
self.points[i]['polarity'] = float(l[pol_id])
self.points[i]['formation'] = l[form_id]
def save(self, filename):
"""Save points set to file
**Arguments**:
- *filename* = string : name of new file
"""
f = open(filename, 'w')
f.write(self.header)
for point in self.points:
f.write("%.2f,%.2f,%.3f,%.3f,%.3f,%d,%s\n" % (point['x'], point['y'], point['z'],
point['azimuth'], point['dip'], point['polarity'],
point['formation']))
f.close()
if __name__ == '__main__':
pass
|
mit
|
flightgong/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
8
|
3217
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
v = DictVectorizer(sparse=sparse, dtype=dtype)
X = v.fit_transform(D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(D).A)
else:
assert_array_equal(X, v.transform(D))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
ucsd-ccbb/VAPr
|
VAPr/vapr_core.py
|
1
|
23692
|
from __future__ import division, print_function
# built-in libraries
import logging
import multiprocessing
import os
import pymongo
import re
import tqdm
import warnings
# third-party libraries
import pandas
import vcf
# project libraries
import VAPr.vcf_merging
import VAPr.annovar_running
import VAPr.filtering
import VAPr.chunk_processing
from VAPr.annovar_output_parsing import AnnovarTxtParser
class VaprDataset(object):
def __init__(self, mongo_db_name, mongo_collection_name, merged_vcf_path=None):
"""Class that contains methods to interact with a parsed database of variants
Args:
mongo_db_name(str): database name
mongo_collection_name(str): collection name
merged_vcf_path(str): path to merged vcf
"""
self._mongo_db_name = mongo_db_name
self._mongo_collection_name = mongo_collection_name
self._merged_vcf_path = merged_vcf_path
self._mongo_client = pymongo.MongoClient(maxPoolSize=None, waitQueueTimeoutMS=200)
self._mongo_db = getattr(self._mongo_client, self._mongo_db_name)
self._mongo_db_collection = getattr(self._mongo_db, self._mongo_collection_name)
@property
def full_name(self):
"""Full name of database and collection
Args:
Returns:
str: Full name of database and collection
"""
return self._mongo_db_collection.full_name
@property
def is_empty(self):
"""If there are no records in the collection, returns True
Args:
Returns:
bool: if there are no records in the collection, returns True
"""
return self._mongo_db_collection.count() == 0
@property
def num_records(self):
"""Number of records in MongoDB collection
Args:
Returns:
int: Number of records in MongoDB collection
"""
return self._mongo_db_collection.count()
def get_rare_deleterious_variants(self, sample_names_list=None):
"""See :ref:`rare-del-variants` for more information on how this is implemented
Args:
sample_names_list(list: list, optional): list of samples to draw variants from (Default value = None)
Returns:
list: list of variants
"""
return self._get_filtered_variants_by_sample(VAPr.filtering.make_rare_deleterious_variants_filter,
sample_names_list)
def get_known_disease_variants(self, sample_names_list=None):
"""See :ref:`known-disease` for more information on how this is implemented
Args:
sample_names_list(list: list, optional): list of samples to draw variants from (Default value = None)
Returns:
list: list of variants
"""
return self._get_filtered_variants_by_sample(VAPr.filtering.make_known_disease_variants_filter,
sample_names_list)
def get_deleterious_compound_heterozygous_variants(self, sample_names_list=None):
"""See :ref:`del-compound` for more information on how this is implemented
Args:
sample_names_list(list: list, optional): list of samples to draw variants from (Default value = None)
Returns:
list: list of variants
"""
return self._get_filtered_variants_by_sample(
VAPr.filtering.make_deleterious_compound_heterozygous_variants_filter, sample_names_list)
def get_de_novo_variants(self, proband, ancestor1, ancestor2):
"""See :ref:`de-novo` for more information on how this is implemented
Args:
proband(str): proband variant
ancestor1(str): ancestor #1 variant
ancestor2(str): ancestor #2 variant
Returns:
list: list of variants
"""
filter_dict = VAPr.filtering.make_de_novo_variants_filter(proband, ancestor1, ancestor2)
return self.get_custom_filtered_variants(filter_dict)
def get_custom_filtered_variants(self, filter_dictionary):
"""See :ref:`custom-filter` for more information on how to implement
Args:
filter_dictionary(dictionary: dict): mongodb custom filter
Returns:
list: list of variants
"""
if self.is_empty:
warnings.warn("Dataset '{0}' is empty, so all filters return an empty list.".format(self.full_name))
return list(self._mongo_db_collection.find(filter_dictionary))
def get_distinct_sample_ids(self):
"""Self-explanatory
Args:
Returns:
list: list of sample ids
"""
result = self._mongo_db_collection.distinct(VAPr.filtering.SAMPLE_ID_SELECTOR)
return result
def get_all_variants(self):
"""Self-explanatory
Args:
Returns:
list: list of variants
"""
return self.get_custom_filtered_variants({})
def get_variants_for_sample(self, sample_name):
"""Return variants for a specific sample
Args:
sample_name(str): name of sample
Returns:
list: list of variants
"""
filter_dict = VAPr.filtering.get_sample_id_filter(sample_name)
return self.get_custom_filtered_variants(filter_dict)
def get_variants_for_samples(self, specific_sample_names):
"""Return variants from multiple samples
Args:
specific_sample_names(list): name of samples
Returns:
list: list of variants
"""
filter_dict = VAPr.filtering.get_any_of_sample_ids_filter(specific_sample_names)
return self.get_custom_filtered_variants(filter_dict)
def get_variants_as_dataframe(self, filtered_variants=None):
"""Utility to get a dataframe from variants, either all of them or a filtered subset
Args:
filtered_variants: a list of variants (Default value = None)
Returns:
pandas.DataFrame
"""
if filtered_variants is None:
filtered_variants = self.get_all_variants()
result = pandas.DataFrame(filtered_variants)
# remove the object id as it is different every time and internal to database
result.drop('_id', axis=1, inplace=True)
return result
def write_unfiltered_annotated_csv(self, output_fp):
"""Full csv file containing annotations from both annovar and myvariant.info
Args:
output_fp(str): Output file path
Returns:
None
"""
all_variants = self.get_all_variants()
self._write_annotated_csv("write_unfiltered_annotated_csv", all_variants, output_fp)
def write_filtered_annotated_csv(self, filtered_variants, output_fp):
"""Filtered csv file containing annotations from a list passed to it, coming from MongoDB
Args:
filtered_variants(list): variants coming from MongoDB
output_fp(str): Output file path
Returns:
None
"""
self._write_annotated_csv("write_filtered_annotated_csv", filtered_variants, output_fp)
def write_unfiltered_annotated_vcf(self, vcf_output_path, info_out=True):
"""Filtered vcf file containing annotations from a list passed to it, coming from MongoDB
Args:
vcf_output_path(str): Output file path
info_out: if True, extra annotation information will be written to the vcf file (Default value = True)
info_out: bool (Default value = True)
Returns:
None
"""
filtered_variants = self.get_all_variants()
self._write_annotated_vcf(filtered_variants, vcf_output_path, info_out=info_out)
def write_filtered_annotated_vcf(self, filtered_variants, vcf_output_path, info_out=True):
"""
Args:
filtered_variants(list): variants coming from MongoDB
vcf_output_path(str): Output file path
info_out: if True, extra annotation information will be written to the vcf file (Default value = True)
info_out: bool (Default value = True)
Returns:
None
"""
self._write_annotated_vcf(filtered_variants, vcf_output_path, info_out=info_out)
def write_unfiltered_annotated_csvs_per_sample(self, output_dir):
"""
Args:
output_dir: return: None
Returns:
None
"""
sample_ids_list = self.get_distinct_sample_ids()
for curr_sample_id in sample_ids_list:
variant_dicts_list = self.get_variants_for_sample(curr_sample_id)
curr_output_fp = os.path.join(output_dir, curr_sample_id + 'unfiltered_annotated_variants.csv')
self.write_filtered_annotated_csv(variant_dicts_list, curr_output_fp)
self._warn_if_no_output("write_unfiltered_annotated_csvs_per_sample", sample_ids_list)
def _write_annotated_csv(self, func_name, filtered_variants, output_fp):
no_output = self._warn_if_no_output(func_name, filtered_variants)
if not no_output:
dataframe = self.get_variants_as_dataframe(filtered_variants)
dataframe.to_csv(output_fp)
def _get_filtered_variants_by_sample(self, filter_builder_func, sample_names=None):
if sample_names is not None and not isinstance(sample_names, list):
sample_names = [sample_names]
filter_dict = filter_builder_func(sample_names)
return self.get_custom_filtered_variants(filter_dict)
def _write_annotated_vcf(self, filtered_variants_dicts_list, vcf_output_path, info_out=True):
if self._merged_vcf_path is None:
raise ValueError("Original vcf file (to be used as template for output vcf) is not set.")
# match at least one character of anything but a : followed by :g. followed by at least one digit followed
# by at least one NOT digit followed by the end of the line
hgvs_regex = r"^([^:]+):g\.(\d+)[^\d].*$"
vcf_template_path = VAPr.vcf_merging.bgzip_and_index_vcf(self._merged_vcf_path)
# This open is done using the filename rather than passing a file handle directly (as is done elsewhere)
# because compressed files must be opened with 'rb' while regular files must be opened with 'r';
# vcf.Reader will work this out for itself if you pass the file name and let it do the opening.
# The slight drawback here is that vcf.Reader doesn't clean up after itself well: it leaves its file
# handle open after use, causing a niggling ResourceWarning: unclosed file warning.
vcf_reader = vcf.Reader(filename=vcf_template_path)
vcf_writer = vcf.Writer(open(vcf_output_path, 'w'), vcf_reader)
for curr_record_dict in filtered_variants_dicts_list:
curr_hgvs_id = curr_record_dict["hgvs_id"]
match_obj = re.match(hgvs_regex, curr_hgvs_id)
curr_chrom = match_obj.group(1).replace(AnnovarTxtParser.CHR_HEADER, "")
if curr_chrom == AnnovarTxtParser.STANDARDIZED_CHR_MT_VAL:
curr_chrom = AnnovarTxtParser.RAW_CHR_MT_VAL
curr_start = int(match_obj.group(2))
for record in vcf_reader.fetch(curr_chrom, curr_start - 1, curr_start + 1):
if info_out is True:
record.INFO.update(curr_record_dict)
vcf_writer.write_record(record)
vcf_writer.close()
self._warn_if_no_output("write_unfiltered_annotated_csvs_per_sample", filtered_variants_dicts_list)
def _warn_if_no_output(self, output_func_name, items_list):
no_output = False
if len(items_list) == 0:
no_output = True
warnings.warn("{0} wrote no file(s) because no relevant samples were found in dataset '{1}'.".format(
output_func_name, self._mongo_db_collection.full_name))
return no_output
class VaprAnnotator(object):
"""Class in charge of gathering requirements, finding files, downloading databases required
to run the annotation
Args:
input_dir(str): Input directory to vcf files
output_dir(str): Output directory to annotated vcf files
mongo_db_name(str): Name of the database to which you'll store the collection of variants
mongo_collection_name(str): Name of the collection to which you'd store the annotated variants
annovar_install_path(str): Path to locally installed annovar scripts
design_file(str): path to csv design file
build_ver(str): genome build version to which annotation will be done against. Either `hg19` or `hg38`
vcfs_gzipped(bool): if the vcf files are gzipped, set to True
Returns:
"""
SAMPLE_NAMES_KEY = "Sample_Names"
HG19_VERSION = "hg19"
HG38_VERSION = "hg38"
DEFAULT_GENOME_VERSION = HG19_VERSION
SUPPORTED_GENOME_BUILD_VERSIONS = [HG19_VERSION, HG38_VERSION]
@staticmethod
def _get_num_lines_in_file(file_path):
with open(file_path) as file_obj:
result = sum(1 for _ in file_obj)
return result
@staticmethod
def _make_jobs_params_tuples_list(file_path, num_file_lines, chunk_size, db_name, collection_name,
genome_build_version, sample_names_list=None, verbose_level=1):
num_params = VAPr.chunk_processing.AnnotationJobParamsIndices.get_num_possible_indices()
if sample_names_list is not None:
shared_job_params = [None] * num_params
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.SAMPLE_LIST_INDEX] = sample_names_list
else:
shared_job_params = [None] * (num_params - 1)
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.CHUNK_SIZE_INDEX] = chunk_size
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.FILE_PATH_INDEX] = file_path
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.DB_NAME_INDEX] = db_name
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.COLLECTION_NAME_INDEX] = collection_name
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.GENOME_BUILD_VERSION_INDEX] = \
genome_build_version
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.VERBOSE_LEVEL_INDEX] = verbose_level
jobs_params_tuples_list = []
num_steps = int(num_file_lines / chunk_size) + 1
for curr_chunk_index in range(num_steps):
shared_job_params[VAPr.chunk_processing.AnnotationJobParamsIndices.CHUNK_INDEX_INDEX] = curr_chunk_index
curr_job_params_tuple = tuple(shared_job_params)
jobs_params_tuples_list.append(curr_job_params_tuple)
return jobs_params_tuples_list
@classmethod
def _get_validated_genome_version(cls, input_genome_build_version):
if input_genome_build_version is None:
result = cls.DEFAULT_GENOME_VERSION
elif input_genome_build_version not in cls.SUPPORTED_GENOME_BUILD_VERSIONS:
str_of_acceptable_versions = ", ".join(cls.SUPPORTED_GENOME_BUILD_VERSIONS)
raise ValueError("Input genome build version '{0}' is not recognized. Supported builds are {1}".format(
input_genome_build_version, str_of_acceptable_versions))
else:
result = input_genome_build_version
return result
@classmethod
def _make_merged_vcf(cls, input_dir, output_dir, analysis_name, design_file, vcfs_gzipped):
vcf_file_paths_list = None
if design_file is not None:
design_df = pandas.read_csv(design_file)
vcf_file_paths_list = design_df[cls.SAMPLE_NAMES_KEY].tolist()
result = VAPr.vcf_merging.merge_vcfs(input_dir, output_dir, analysis_name,
vcf_file_paths_list, vcfs_gzipped)
return result
def __init__(self, input_dir, output_dir, mongo_db_name, mongo_collection_name, annovar_install_path=None,
design_file=None, build_ver=None, vcfs_gzipped=False):
self._input_dir = input_dir
self._output_dir = output_dir
self._mongo_db_name = mongo_db_name
self._mongo_collection_name = mongo_collection_name
self._analysis_name = mongo_db_name + "_" + mongo_collection_name
self._path_to_annovar_install = annovar_install_path
self._design_file = design_file
self._vcfs_gzipped = vcfs_gzipped
self._genome_build_version = self._get_validated_genome_version(build_ver)
self._single_vcf_path = self._make_merged_vcf(self._input_dir, self._output_dir, self._analysis_name,
self._design_file, self._vcfs_gzipped)
self._output_basename = os.path.splitext(os.path.basename(self._single_vcf_path))[0]
# This open is done using the filename rather than passing a file handle directly (as is done elsewhere)
# because compressed files must be opened with 'rb' while regular files must be opened with 'r';
# vcf.Reader will work this out for itself if you pass the file name and let it do the opening.
# The slight drawback here is that vcf.Reader doesn't clean up after itself well: it leaves its file
# handle open after use, causing a niggling ResourceWarning: unclosed file warning.
vcf_reader = vcf.Reader(filename=self._single_vcf_path)
self._sample_names_list = vcf_reader.samples
# TODO: someday: put back the functionality for custom annovar dbs?
self._annovar_wrapper = None
if self._path_to_annovar_install is not None:
self._annovar_wrapper = VAPr.annovar_running.AnnovarWrapper(
self._path_to_annovar_install, genome_build_version=self._genome_build_version,
custom_annovar_dbs_to_use=None)
try:
os.mkdir(output_dir)
except OSError:
logging.info('Output directory %s for analysis already exists; using existing directory' % output_dir)
def download_annovar_databases(self):
""" Needed for ANNOVAR to run, it will download the required databases
Args:
Returns:
"""
if self._path_to_annovar_install is None:
raise ValueError("No ANNOVAR install path provided.")
self._annovar_wrapper.download_databases()
def annotate_lite(self, num_processes=8, chunk_size=2000, verbose_level=1, allow_adds=False):
"""'Lite' Annotation: it will query `myvariant.info <myvariant.info>`_ only, without
generating annotations from Annovar. It requires solely VAPr to be installed.
The execution will grab the HGVS ids from the vcf files and query the variant data from MyVariant.info.
.. warnings:: It is subject to the issue of potentially having completely empty data for some of the variants,
and inability to run native VAPr queries on the data.
It will return the class :class:`~VAPr.vapr_core.VaprDataset`, which can then be used for downstream
filtering and analysis.
Args:
num_processes(int, optional): number of parallel processes. Defaults to 8
chunk_size(int, optional): int number of variants to be processed at once. Defaults to 2000
verbose_level(int, optional): int higher verbosity will give more feedback, raise to 2 or 3 when debugging. Defaults to 1
allow_adds(bool, optional): bool Allow adding new variants to a pre-existing Mongo collection, or overwrite it (Default value = False)
Returns:
class:`~VAPr.vapr_core.VaprDataset`
"""
result = self._make_dataset_for_results("annotate_lite", allow_adds)
self._collect_annotations_and_store(self._single_vcf_path, chunk_size, num_processes, sample_names_list=None,
verbose_level=verbose_level)
return result
def annotate(self, num_processes=4, chunk_size=2000, verbose_level=1, allow_adds=False):
"""This is the main function of the package. It will run Annovar beforehand, and will kick-start the full
annotation functionality. Namely, it will collect all the variant data from Annovar annotations, combine
it with data coming from MyVariant.info, and parse it to MongoDB, in the database and collection specified in
project_data.
It will return the class :class:`~VAPr.vapr_core.VaprDataset`, which can then be used for downstream
filtering and analysis.
Args:
num_processes(int, optional): number of parallel processes. Defaults to 8
chunk_size(int, optional): int number of variants to be processed at once. Defaults to 2000
verbose_level(int, optional): int higher verbosity will give more feedback, raise to 2 or 3 when debugging. Defaults to 1
allow_adds(bool, optional): bool Allow adding new variants to a pre-existing Mongo collection, or overwrite it (Default value = False)
Returns:
class: class:`~VAPr.vapr_core.VaprDataset`
"""
if self._path_to_annovar_install is None:
raise ValueError("No ANNOVAR install path provided.")
result = self._make_dataset_for_results("annotate", allow_adds)
annovar_output_fp = self._annovar_wrapper.run_annotation(self._single_vcf_path, self._output_basename,
self._output_dir)
self._collect_annotations_and_store(annovar_output_fp, chunk_size, num_processes,
sample_names_list=self._sample_names_list, verbose_level=verbose_level)
return result
def _make_dataset_for_results(self, func_name, allow_adds):
result = VaprDataset(self._mongo_db_name, self._mongo_collection_name, self._single_vcf_path)
if not result.is_empty:
msg_prefix = "Dataset '{0}' already contains {1} records".format(result.full_name, result.num_records)
if allow_adds:
logging.info("{0}; adding to this dataset.".format(msg_prefix))
else:
error_msg = "{0}, but writing into an already-filled dataset is disallowed by default. " \
"Either create a VaprAnnotator with a new collection name, clear your existing collection " \
"manually, or (if you definitely wish to add to an existing dataset), rerun {1} with the " \
"'allow_adds' parameter set to True.".format(msg_prefix, func_name)
raise ValueError(error_msg)
return result
# TODO: someday: extra_data from design file needs to come back in here
def _collect_annotations_and_store(self, file_path, chunk_size, num_processes, sample_names_list=None,
verbose_level=1):
num_file_lines = self._get_num_lines_in_file(file_path)
jobs_params_tuples_list = self._make_jobs_params_tuples_list(
file_path, num_file_lines, chunk_size, self._mongo_db_name, self._mongo_collection_name,
self._genome_build_version, sample_names_list, verbose_level)
pool = multiprocessing.Pool(num_processes)
for _ in tqdm.tqdm(
pool.imap_unordered(
VAPr.chunk_processing.collect_chunk_annotations_and_store, jobs_params_tuples_list),
total=len(jobs_params_tuples_list)):
pass
pool.close()
pool.join()
|
mit
|
petosegan/scikit-learn
|
sklearn/tree/tree.py
|
113
|
34767
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
tku137/JPKay
|
JPKay/core/data_structures.py
|
1
|
16242
|
# coding=utf-8
import os
import re
from zipfile import ZipFile
import dateutil.parser as parser
import pytz
from struct import unpack
import numpy as np
import pandas as pd
class ForceArchive:
"""
Object to handle reading contents of a jpk-force zipped file.
- **Methods**
- ls: list archive contents
- read_properties: read utf-8 string decoded content of a property file, one property per list entry
- read_data: read encoded raw data, must be converted to appropriate physical quantity!
"""
# noinspection SpellCheckingInspection
def __init__(self, file_path):
self._zip_file = ZipFile(file_path)
if not self.read_properties('header.properties')['jpk-data-file'] == 'spm-forcefile':
raise ValueError("not a valid spm-forcefile!")
self.contents = self.ls()
def ls(self):
"""List all files contained in this force-archive"""
return self._zip_file.infolist()
def read_properties(self, content_path):
"""
Reads a property file form the force-archive.
The contents of the property file are elements of a list. Each entry is already decoded to utf-8.
:param content_path: internal path to the force-archive file
:type content_path: str
:return: property list
:rtype: dict
"""
if not os.path.basename(content_path).endswith(".properties"):
raise ValueError("this content path is not a property file")
try:
with self._zip_file.open(content_path) as file:
content = [line.decode('utf-8') for line in file.read().splitlines()]
# parse prop dictionary (without header date)
props = {}
for line in content[1:]:
key, value = line.split("=")
props[key] = value
# parse measurement date-time
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
utc = pytz.utc
props["timestamp"] = utc.localize(parser.parse(content[0][1:], dayfirst=True)).strftime(fmt)
return props
except IOError:
print("can't read property file")
def read_data(self, content_path):
"""
Reads the raw integer-encoded data of the specified data file inside a force-archive.
:param content_path: internal path to the force-archive file
:type content_path: str
:return: raw data
:rtype: numpy.ndarray
"""
if not os.path.basename(content_path).endswith(".dat"):
raise ValueError("this content path is not a data file")
try:
# read binary data
data = self._zip_file.read(content_path)
# decode using big-endian integer
result = []
for i in range(int(len(data) / 4)):
result.append(unpack('!i', data[i * 4:(i + 1) * 4]))
# returning integer-encoded raw data vector
return np.array(result)
except IOError:
print("can't read data file")
class Properties:
"""
Object to automatically extract and conveniently use relevant JPK force file header information.
This comprises things like conversion factors for raw data, units, and so on
- **attributes**
- vDeflection_channel_number: internal number of vDeflection channel raw data
- conversion_factors: dictionary containing important information
- units: dictionary containing channel units
- **example usage**::
>>> force_file = r"path/to/jpk-force-file"
>>> props = Properties(file_path=force_file)
>>> print(props.units["vDeflection"])
V
>>> print(props.conversion_factors["vDeflection"]["force multiplier"])
0.01529211140472191
"""
def __init__(self, file_path):
# parse file path to header.properties file
self.file_path = file_path
# load the property file (you have to instantiate and load subsequently)
self.general = self.load_general_props()
self.segments = self.extract_segment_props()
# set vDeflection channel number, always extract freshly because channel numbering seems to be inconsistent
self.channel_numbers = self.get_channel_numbers()
# extract raw conversion factors and other specifications like units and the lik
self.conversion_factors = self.extract_conversion_factors()
self.units = {}
self.extract_specs()
def load_general_props(self):
"""
This actually loads the props file on disk from jpk-force zip-file. Parses all java-properties info and the
timestamp from the header of the header.
:return: props dictionary
:rtype: dict
"""
# load general and shared header.properties file from zipfile
root = ForceArchive(self.file_path).read_properties('header.properties')
shared = ForceArchive(self.file_path).read_properties('shared-data/header.properties')
full = {}
full.update(root)
full.update(shared)
return full
# noinspection PyPep8Naming
def get_channel_numbers(self):
"""
Extracts the channel numbers for each channel.
:return: dictionary with channel numbers
:rtype: dict
"""
channel_numbers = {"vDeflection": None, "hDeflection": None, "height": None, "capacitiveSensorHeight": None}
for key, value in self.general.items():
if value == "vDeflection":
channel_numbers[value] = re.search(r'(?<=lcd-info\.)\d(?=\.channel.name)', key).group()
if value == "hDeflection":
channel_numbers[value] = re.search(r'(?<=lcd-info\.)\d(?=\.channel.name)', key).group()
if value == "height":
channel_numbers[value] = re.search(r'(?<=lcd-info\.)\d(?=\.channel.name)', key).group()
if value == "capacitiveSensorHeight":
channel_numbers[value] = re.search(r'(?<=lcd-info\.)\d(?=\.channel.name)', key).group()
return channel_numbers
# noinspection PyPep8Naming
def extract_conversion_factors(self):
"""
Extracts all conversion factors for the raw data channels. Currently, only vDeflection channel is
extracted, because it is the only one calibrated during AFM measurements
:return: dict with conversion factors
:rtype: dict
"""
# get some info to reduce ridiculously long java-prop names
vDeflection_channel = "lcd-info.{}.".format(self.channel_numbers["vDeflection"])
vDeflection_encoder = "{}encoder.scaling.".format(vDeflection_channel)
vDeflection_conversion = "{}conversion-set.conversion.".format(vDeflection_channel)
height_channel = "lcd-info.{}.".format(self.channel_numbers["height"])
height_encoder = "{}encoder.scaling.".format(height_channel)
height_conversion = "{}conversion-set.conversion.".format(height_channel)
factors = {"vDeflection": {}, "height": {}}
# parse vDeflection conversion factors
factors["vDeflection"]["raw multiplier"] = \
np.array(float(self.general["{}multiplier".format(vDeflection_encoder)]))
factors["vDeflection"]["raw offset"] = np.array(float(self.general["{}offset".format(vDeflection_encoder)]))
factors["vDeflection"]["distance multiplier"] = \
np.array(float(self.general["{}distance.scaling.multiplier".format(vDeflection_conversion)]))
factors["vDeflection"]["distance offset"] = \
np.array(float(self.general["{}distance.scaling.offset".format(vDeflection_conversion)]))
factors["vDeflection"]["force multiplier"] = \
np.array(float(self.general["{}force.scaling.multiplier".format(vDeflection_conversion)]))
factors["vDeflection"]["force offset"] = \
np.array(float(self.general["{}force.scaling.offset".format(vDeflection_conversion)]))
# parse height conversion factors
factors["height"]["raw multiplier"] = np.array(float(self.general["{}multiplier".format(height_encoder)]))
factors["height"]["raw offset"] = np.array(float(self.general["{}offset".format(height_encoder)]))
factors["height"]["calibrated multiplier"] = \
np.array(float(self.general["{}nominal.scaling.multiplier".format(height_conversion)]))
factors["height"]["calibrated offset"] = \
np.array(float(self.general["{}nominal.scaling.offset".format(height_conversion)]))
return factors
# noinspection SpellCheckingInspection,PyPep8Naming
def extract_specs(self):
"""Extracts any kind of infos from the header, like units and the like"""
vDeflection_unit = "lcd-info.{}.conversion-set.conversion.force.scaling.unit.unit".format(
self.channel_numbers["vDeflection"])
self.units["vDeflection"] = self.general[vDeflection_unit]
height_unit = "lcd-info.{}.conversion-set.conversion.nominal.scaling.unit.unit".format(
self.channel_numbers["height"])
self.units["height"] = self.general[height_unit]
def extract_segment_props(self):
"""
Extract properties for each data segment. Additionally, JPKs segment names are converted to a more useful
naming scheme: approach, contact, retract, pause. Also the much needed segment number is stored to use during
data loading. Properties for each segment are stored in a dictionary under the respective segment names as key.
:return: per-segment properties
:rtype: dict
"""
props = {}
num_segments = int(self.general['force-scan-series.force-segments.count'])
for segment in range(num_segments):
segment_props = ForceArchive(self.file_path).read_properties(
'segments/{}/segment-header.properties'.format(segment))
# noinspection SpellCheckingInspection
name_jpk = segment_props['force-segment-header.name.name'].replace('-cellhesion200', '')
normal_name = self.convert_segment_name(name_jpk)
props[normal_name] = segment_props
props[normal_name]["name_jpk"] = name_jpk
props[normal_name]["name"] = normal_name
props[normal_name]["segment_number"] = str(segment)
return props
@staticmethod
def convert_segment_name(jpk_name):
"""Convert JPKs segment names to useful ones"""
if jpk_name == 'extend':
real_name = 'approach'
elif jpk_name == 'pause-at-end':
real_name = 'contact'
elif jpk_name == 'pause-at-start':
real_name = 'pause'
else:
real_name = jpk_name
return real_name
class CellHesion:
# noinspection SpellCheckingInspection
"""
This is the main data-class that provides all functionality to load, analyze and display a single JPK
CellHesion200 force file archive.
**Attributes**
The following attributes are available:
- archive: an instance of :class:`.ForceArchive`
- properties: an instance of :class:`.Properties`
- data: :class:`pandas:pandas.DataFrame`
**Example Usage**
>>> jpk_file = r'path/to/jpk-force/file'
>>> sample = CellHesion(force_file=jpk_file)
>>> import matplotlib.pyplot as plt
>>> x = sample.data.retract.height * 10**6
>>> y = sample.data.retract.force * 10**12
>>> plt.plot(x, y)
>>> plt.xlabel("height [µm]"); plt.ylabel("force [pN]")
"""
def __init__(self, force_file):
# parse and check file path
if os.path.isfile(force_file):
self.file = force_file
else:
raise ValueError("file does not exist")
#
self.archive = ForceArchive(file_path=self.file)
self.properties = Properties(file_path=self.file)
#
self.data = self.load_data()
# noinspection PyPep8Naming
def load_encoded_data_segment(self, segment):
"""
Loads the raw, encoded vertical deflection and height data of the specified segment.
This has to be converted using :func:`convert_data` to make use of it.
:param segment: data segment to load
:type segment: str
:return: vDeflection and height
"""
# get data locations
segment_number = self.properties.segments[segment]['segment_number']
vDeflection_file = 'segments/{}/channels/vDeflection.dat'.format(segment_number)
height_file = 'segments/{}/channels/height.dat'.format(segment_number)
# load encoded data from archive
vDeflection = self.archive.read_data(vDeflection_file)
height = self.archive.read_data(height_file)
return vDeflection, height
# noinspection PyPep8Naming
def load_data(self):
"""
Load converted data to DataFrame. See :func:`construct_df` for DataFrame structure.
:return: force/height data
:rtype: pandas.DataFrame
"""
df = self.construct_df()
for segment in list(self.properties.segments.keys()):
# load raw data
vDeflection_raw, height_raw = self.load_encoded_data_segment(segment)
# convert data to normal physical units
vDeflection = self.convert_data('vDeflection', vDeflection_raw)
height = self.convert_data('height', height_raw)
df.loc[:, (segment, 'force')] = pd.Series(vDeflection.squeeze())
df.loc[:, (segment, 'height')] = pd.Series(height.squeeze())
return df
def convert_data(self, channel, data):
"""
Convert specific data from specific channel from encoded integer format to physical quantity.
Each channel has it's own conversion factors and formulas, so the correct channel has to be provided.
:param channel: data channel
:type channel: str
:param data: encoded data
:type data: numpy.ndarray
:return: converted data
:rtype: numpy.array
"""
if not isinstance(data, np.ndarray):
raise ValueError("data has to be numpy array")
# convert vDeflection from encoded to distance to force with linear conversion factors
# the returned object is already a numpy ndarray in unit Newton (N)
if channel == 'vDeflection':
raw_m = self.properties.conversion_factors[channel]["raw multiplier"]
raw_n = self.properties.conversion_factors[channel]["raw offset"]
dist_m = self.properties.conversion_factors[channel]["distance multiplier"]
dist_n = self.properties.conversion_factors[channel]["distance offset"]
force_m = self.properties.conversion_factors[channel]["force multiplier"]
force_n = self.properties.conversion_factors[channel]["force offset"]
converted_data = ((raw_m * data + raw_n) * dist_m + dist_n) * force_m + force_n
return converted_data
# convert height from encoded to calibrated height
# the returned object is already a numpy ndarray in unit Meter (m)
elif channel == 'height':
raw_m = self.properties.conversion_factors[channel]["raw multiplier"]
raw_n = self.properties.conversion_factors[channel]["raw offset"]
cal_m = self.properties.conversion_factors[channel]["calibrated multiplier"]
cal_n = self.properties.conversion_factors[channel]["calibrated offset"]
converted_data = (raw_m * data + raw_n) * cal_m + cal_n
return converted_data
else:
raise ValueError("not a valid channel")
@staticmethod
def construct_df():
"""
Construct a pandas DataFrame to store force and height data for each segment.
:return: DataFrame blueprint
:rtype: pandas.DataFrame
"""
iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]
index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])
return pd.DataFrame(columns=index)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.