repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jasanmiguel10/HackEvent | src/visualizacion_peque.py | 1 | 3124 | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
from mpl_toolkits.basemap import Basemap,maskoceans
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
llcrnrlat=43
llcrnrlon=-116
urcrnrlat=50
urcrnrlon=-103
cobertura=0.2
m=Basemap(projection='mill',llcrnrlat=llcrnrlat,llcrnrlon=llcrnrlon,urcrnrlat=urcrnrlat,urcrnrlon=urcrnrlon,resolution='c')
x_min,y_min=m(llcrnrlon,llcrnrlat)
x_max,y_max=m(urcrnrlon,urcrnrlat)
#numero de datos ficticios (prueba)
n=1000
#longitud de los cuadrados de la grilla en grados
l=1
#dimensiones de la grilla
height=urcrnrlat-llcrnrlat
length=urcrnrlon-llcrnrlon
"""
tweets_array=np.array([np.random.uniform(low=llcrnrlat,high=height+llcrnrlat,size=n),
np.random.uniform(low=llcrnrlon,high=length+llcrnrlon,size=n),
np.random.binomial(np.ones(n).astype(int),np.ones(n)*0.5)]).transpose()
tweets=pd.DataFrame(tweets_array,columns=['lat','lon','clase'])
"""
tweets=pd.read_csv("cebo_final.tsv",sep="\t",names=['clase','lat','lon'])
tweets=tweets.iloc[np.where(tweets['lat']>=llcrnrlat)]
tweets=tweets.iloc[np.where(tweets['lon']>=llcrnrlon)]
tweets=tweets.iloc[np.where(tweets['lat']<=urcrnrlat)]
tweets=tweets.iloc[np.where(tweets['lon']<=urcrnrlon)]
tweets.reset_index(inplace=True)
#una región se define por las coordenadas de su centro
ceros=np.where(tweets['clase']==0)[0]
tweets['clase'][np.where(tweets['clase']==2)[0]]=0
tweets['clase'][np.where(tweets['clase']==1)[0]]=1.1
tweets['clase'][ceros]=3.5
tweets[['lat','lon']]=tweets[['lat','lon']]-(tweets[['lat','lon']]%l)
regiones=tweets.groupby(['lat','lon']).agg(['sum','count']).clase
regiones['x'],regiones['y']=m(np.array(regiones.index.get_level_values('lon')),np.array(regiones.index.get_level_values('lat')))
#tweets['x'],tweets['y']=m(tweets['lon'],tweets['lat'])
regiones['avg']=regiones['sum']/regiones['count']
#ratio=(x_max-x_min)/(y_max-y_min)
#nbins=30
#heatmap, xedges, yedges = np.histogram2d(regiones['x'], regiones['y'],bins=[int(nbins*ratio),nbins],weights=regiones['avg'], range=[[x_min,x_max],[y_min,y_max]])
#heatmap, xedges, yedges = np.histogram2d(tweets['x'], tweets['y'],bins=[int(nbins*ratio),nbins],weights=tweets['clase'], range=[[x_min,x_max],[y_min,y_max]])
extent = [x_min, x_max, y_min, y_max]
M=np.zeros(((int((urcrnrlat-llcrnrlat)/l)),(int((urcrnrlon-llcrnrlon)/l))))
for i in range(regiones.shape[0]):
coords=regiones.iloc[i].name
try:
M[int((coords[0]-llcrnrlat)/l),int((coords[1]-llcrnrlon)/l)+1]=regiones.iloc[i].avg
except: pass
#plt.imshow(heatmap.T,interpolation='bicubic',extent=extent,cmap='coolwarm',origin='lower')
plt.imshow(M,interpolation='bicubic',extent=extent,cmap='coolwarm',origin='lower')
m.drawcountries()
m.drawcoastlines()
m.drawstates()
plt.colorbar(orientation='horizontal',shrink=0.9)
plt.savefig('testplot1.png',dpi=900)
plt.show()
hotspots=regiones[['avg']].sort_values(by='avg',ascending=False).iloc[range(int(regiones.shape[0]*0.2))]
hotspots['lat']=hotspots.index.get_level_values('lat')
hotspots['lon']=hotspots.index.get_level_values('lon') | mit |
THEdavehogue/punxsutawney_phil_predictor | gather_data.py | 1 | 9559 | import os
import json
import numpy as np
import pandas as pd
import requests
from time import sleep
from datetime import datetime, time, timedelta
from progressbar import ProgressBar
from pymongo import MongoClient
KEY = os.environ['DARK_SKY_KEY']
LAT = '40.9310'
LON = '-78.9572'
EARLY_SPRING_YEARS = ['1950', '1970', '1975', '1983', '1986', '1988', '1990',
'1995', '1997', '1999', '2007', '2011', '2013', '2016']
def API_call(url):
'''
Make API call for the given url
INPUT:
url: str, url for API call
OUTPUT:
response: HTTP response object
'''
try:
response = requests.get(url)
except:
sleep(5)
response = API_call(url)
return response
def scrape_weather_data(year, db_coll):
'''
Get Punxsutawney weather data for a given Groundhog Day
INPUT:
year: str, year of weather data
db_coll: pymongo collection for storing weather data
'''
time = '{}-02-02T12:00:00Z'.format(year) # Noon UTC = 7am Punxsutawney
url = 'https://api.darksky.net/forecast/{}/{},{},{}'.format(KEY, LAT, LON, time)
response = API_call(url)
if response.status_code == 200:
forecast = response.json()
if year in EARLY_SPRING_YEARS:
forecast[u'prediction'] = 1
else:
forecast[u'prediction'] = 0
db_coll.insert_one(forecast)
else:
scrape_weather_data(year, db_coll)
def populate_weather_db(db_coll):
years = map(str, np.arange(1944, 2018))
print 'Checking weather table . . .'
if pred_coll.count() < len(years):
print 'Refreshing weather observations . . .'
pred_coll.drop()
pbar = ProgressBar()
for year in pbar(years):
scrape_weather_data(year, pred_coll)
else:
print 'Weather table already populated!'
def unix_to_datetime(unix_time):
'''
Convert UNIX time to datetime
INPUT:
unix_time: int, UNIX time (seconds since 1970-01-01 00:00:00)
OUTPUT:
new_dt: datetime object, datetime representation of unix_time
'''
orig = datetime(1970, 1, 1, 0, 0, 0, 0)
new_dt = orig + timedelta(seconds=unix_time)
return new_dt
def empty_df(hourly=False):
'''
Function to create an empty pandas DataFrame object (used in mongo_to_pandas)
INPUT: None
OUTPUT: empty pandas DataFrame object
'''
if not hourly:
df = pd.DataFrame(columns=['date',
'max_temp',
'min_temp',
'dew_point',
'humidity',
'condition',
'moon_phase',
'precip_type',
'visibility',
'wind_bearing',
'wind_speed',
'prediction'])
else:
df = pd.DataFrame(columns=['date',
'time',
'feels_like_temp',
'dew_point',
'humidity',
'precip_type',
'summary',
'actual_temp',
'visibility',
'wind_bearing',
'wind_speed'])
return df
def parse_record_daily(rec):
'''
Function to parse Mongo record into a pandas Series object
INPUT:
rec: record from MongoDB
OUTPUT:
row: Mongo record converted to pandas Series
'''
daily = rec['daily']['data'][0]
date = unix_to_datetime(daily['time']).date()
if daily.get('icon', None) == 'partly-cloudy-day' or \
daily.get('icon', None) == 'partly-cloudy-night':
condition = 'partly-cloudy'
else:
condition = daily.get('icon', None)
row = {'date': date,
'max_temp': daily.get('temperatureMax', None),
'min_temp': daily.get('temperatureMin', None),
'dew_point': daily.get('dewPoint', None),
'humidity': daily.get('humidity', None),
'condition': condition,
'moon_phase': daily.get('moonPhase', None),
'precip_type': daily.get('precipType', 'None'),
'visibility': daily.get('visibility', None),
'wind_bearing': daily.get('windBearing', None),
'wind_speed': daily.get('windSpeed', None),
'prediction': rec.get('prediction', None)}
return pd.Series(row)
def parse_record_hourly(rec):
'''
Function to parse Mongo record into a pandas Series object
INPUT:
rec: record from MongoDB
OUTPUT:
row: Mongo record converted to pandas DataFrame
'''
rows = empty_df(hourly=True)
offset = rec['offset']
hourly = rec['hourly']['data']
date = unix_to_datetime(rec['daily']['data'][0]['time']).date()
for hour in hourly:
local_time = unix_to_datetime(hour['time']) + timedelta(hours=offset)
row = {'date': date,
'time': local_time.time(),
'feels_like_temp': hour.get('apparentTemperature'),
'dew_point': hour.get('dewPoint'),
'humidity': hour.get('humidity'),
'precip_type': hour.get('precipType'),
'summary': hour.get('summary'),
'actual_temp': hour.get('temperature'),
'visibility': hour.get('visibility'),
'wind_bearing': hour.get('windBearing', 0),
'wind_speed': hour.get('windSpeed', 0),
'prediction': rec.get('prediction')}
rows = rows.append(pd.Series(row), ignore_index=True)
return rows
def mongo_to_pandas(db_coll):
'''
Convert JSON records in MongoDB collection to pandas DataFrame
INPUT:
db_coll: pymongo collection
OUTPUT:
df: Pandas DataFrame
'''
c = db_coll.find()
records = list(c)
df_daily = empty_df(hourly=False)
df_hourly = empty_df(hourly=True)
pbar = ProgressBar()
for rec in pbar(records):
day = parse_record_daily(rec)
df_daily = df_daily.append(day, ignore_index=True)
hours = parse_record_hourly(rec)
df_hourly = df_hourly.append(hours, ignore_index=True)
for df in [df_daily, df_hourly]:
df['wind_bearing'] = df['wind_bearing'].astype(int)
df['prediction'] = df['prediction'].astype(int)
df_hourly['precip_type'] = df_hourly['precip_type'].fillna('None')
return df_daily, df_hourly
def scrub_data(df, hourly=False):
if not hourly:
df_daily = df
df_precip_dummies = pd.get_dummies(df['precip_type'], drop_first=True)
df_condition_dummies = pd.get_dummies(df['condition'], drop_first=True)
df_daily = df.drop(['date', 'condition', 'precip_type'], axis=1)
df_daily = pd.concat([df_daily, df_precip_dummies, df_condition_dummies], axis=1)
return df_daily
else:
df_hourly = df
mask_a = df_hourly['time'] >= time(7, 0)
mask_b = df_hourly['time'] <= time(9, 0)
df_morning = df_hourly[mask_a & mask_b]
dates = df_morning['date'].unique()
df_summaries = pd.DataFrame(columns = df_morning.columns)
for dt in dates:
new_row = {}
df_slice = df_morning[df_morning['date'] == dt]
new_row['actual_temp'] = df_slice['actual_temp'].mean()
new_row['date'] = dt
new_row['dew_point'] = df_slice['dew_point'].mean()
new_row['feels_like_temp'] = df_slice['feels_like_temp'].mean()
new_row['humidity'] = df_slice['humidity'].mean()
try:
new_row['precip_type'] = df_slice['precip_type'].mode()[0]
except:
new_row['precip_type'] = 'None'
new_row['prediction'] = df_slice['prediction'].mean()
try:
new_row['summary'] = df_slice['summary'].mode()[0]
except:
new_row['summary'] = 'Overcast'
new_row['time'] = 'morning avg'
new_row['visibility'] = df_slice['visibility'].mean()
new_row['wind_bearing'] = df_slice['wind_bearing'].mean()
new_row['wind_speed'] = df_slice['wind_speed'].mean()
df_summaries = df_summaries.append(pd.Series(new_row), ignore_index=True)
df_precip_dummies = pd.get_dummies(df_summaries['precip_type'], drop_first=True)
df_summary_dummies = pd.get_dummies(df_summaries['summary'], drop_first=True)
df_summaries = df_summaries.drop(['date', 'precip_type', 'summary', 'time'], axis=1)
df_summaries = pd.concat([df_summaries, df_precip_dummies, df_summary_dummies], axis=1)
return df_summaries
if __name__ == '__main__':
db_client = MongoClient()
db = db_client['groundhog_day']
pred_coll = db['predictions']
populate_weather_db(pred_coll)
df_daily, df_hourly = mongo_to_pandas(pred_coll)
df_daily_scrubbed = scrub_data(df_daily, hourly=False)
df_hourly_scrubbed = scrub_data(df_hourly, hourly=True)
df_daily.to_pickle('data/groundhog_daily.pkl')
df_hourly.to_pickle('data/groundhog_hourly.pkl')
df_daily_scrubbed.to_pickle('data/groundhog_daily_scrubbed.pkl')
df_hourly_scrubbed.to_pickle('data/groundhog_hourly_scrubbed.pkl')
| gpl-3.0 |
lkilcommons/ssmtools | ssm_remove_step.py | 1 | 14695 | import numpy as np
import matplotlib
import matplotlib.pyplot as pp
from spacepy import pycdf
import os,time,argparse
#from scipy.ndimage.filters import gaussian_filter1d
import ssm_read_data
angbetween = lambda v1,v2: np.arccos((v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2])/(np.sqrt(v1[0]**2+v1[1]**2+v1[2]**2)*np.sqrt(v2[0]**2+v2[1]**2+v2[2]**2)))
class ssm_step_remover(object):
"""
Attempt to remove steps using Welches/Student's T test
"""
def __init__(self,ssmcdffn,plot_correction=False,modifycdf=False,validation_plots=False,reader=None):
#Option to load prexisting reader
if reader is None:
self.reader = ssm_read_data.ssm_cdf_reader(ssmcdffn)
self.cdffn = ssmcdffn
else:
print "Warning: Ignoring call to use cdffile %s, and using cdf reader instance passed as kwargs['reader'] instead." % (ssmcdffn)
self.reader = reader
self.cdffn = reader.cdffn
self.plot_correction = plot_correction
self.modifycdf = modifycdf
self.validation_plots = validation_plots
#Make copies so we can compare
self.orig_dB_along,self.orig_dB_across,self.orig_dB_up = self.reader.dB_along.copy(),self.reader.dB_across.copy(),self.reader.dB_up.copy()
self.orig_dBd1,self.orig_dBd2,self.orig_dBd3 = self.reader.dBd1.copy(),self.reader.dBd2.copy(),self.reader.dBd3.copy()
if self.plot_correction:
self.pauselen = 1
self.f = pp.figure()
self.a1 = self.f.add_subplot(111)
pp.ion()
def plot_difference(self,oi,hemi):
ms=3
inpass = self.reader.get_orbit_mask(oi,hemisphere=hemi,remove_nan=False)
t = self.reader.ut[inpass]
dB_along,dB_across,dB_up = self.reader.dB_along[inpass],self.reader.dB_across[inpass],self.reader.dB_up[inpass]
odB_along,odB_across,odB_up = self.orig_dB_along[inpass],self.orig_dB_across[inpass],self.orig_dB_up[inpass]
dBd1,dBd2,dBd3 = self.reader.dBd1[inpass],self.reader.dBd2[inpass],self.reader.dBd3[inpass]
odBd1,odBd2,odBd3 = self.orig_dBd1[inpass],self.orig_dBd2[inpass],self.orig_dBd3[inpass]
#Spacecraft coordinates
fsc = pp.figure(figsize=(11,8))
asc1 = fsc.add_subplot(311)
asc2 = fsc.add_subplot(312)
asc3 = fsc.add_subplot(313)
asc1.plot(t,odB_along,'b.',label='Along, Before',ms=ms)
asc1.plot(t,dB_along,'r.',label='Along, After',ms=ms)
asc1.legend()
asc2.plot(t,odB_across,'b.',label='Across, Before',ms=ms)
asc2.plot(t,dB_across,'r.',label='Across, After',ms=ms)
asc2.legend()
asc3.plot(t,odB_up,'b.',label='Up, Before',ms=ms)
asc3.plot(t,dB_up,'r.',label='Up, After',ms=ms)
asc3.legend()
asc1.set_title('Magnetic Pertrubations Before and After Step Removal')
fapx = pp.figure(figsize=(11,8))
apx1 = fapx.add_subplot(311)
apx2 = fapx.add_subplot(312)
apx3 = fapx.add_subplot(313)
apx1.plot(t,odBd1,'b.',label='dBd1, Before')
apx1.plot(t,dBd1,'r.',label='dBd1, After')
apx1.legend()
apx2.plot(t,odBd2,'b.',label='dBd2, Before')
apx2.plot(t,dBd2,'r.',label='dBd2, After')
apx2.legend()
apx3.plot(t,odBd3,'b.',label='dBd3, Before')
apx3.plot(t,dBd3,'r.',label='dBd3, After')
apx3.legend()
for ax in [asc1,asc2,asc3,apx1,apx2,apx3]:
ax.set_ylabel('nT')
apx1.set_title('Apex Coordinates Magnetic Pertrubations Before and After Step Removal')
parts = os.path.split(self.cdffn)
filename = parts[-1]
path = parts[0]
stem = os.path.splitext(filename)[0]
plotfolder = os.path.join(path,stem+'_validation')
if not os.path.exists(plotfolder):
os.makedirs(os.path.join(plotfolder,'sc'))
os.makedirs(os.path.join(plotfolder,'apx'))
fsc.suptitle('%s, Orbit %d, Hemisphere: %s' % (filename,oi,hemi))
fapx.suptitle('%s, Orbit %d, Hemisphere: %s' % (filename,oi,hemi))
fsc.savefig(os.path.join(plotfolder,'sc','%s_orbit%.2d_%s_sc.png' % (stem,oi,hemi)))
fapx.savefig(os.path.join(plotfolder,'apx','%s_orbit%.2d_%s_apx.png' % (stem,oi,hemi)))
def repair_pass(self,oi,hemi):
"""
oi - orbit index
hemi - 'N' or 'S', hemisphere
"""
ms = 3
inpass = self.reader.get_orbit_mask(oi,hemisphere=hemi,remove_nan=False)
passinds = np.flatnonzero(inpass)
t = self.reader.ut[inpass]
dB_along,dB_across,dB_up = self.reader.dB_along[inpass],self.reader.dB_across[inpass],self.reader.dB_up[inpass]
dB_d1,dB_d2,dB_d3 = self.reader.dBd1[inpass],self.reader.dBd2[inpass],self.reader.dBd3[inpass]
#Create the scalar field perturbation
dB = np.sqrt(dB_along**2+dB_across**2+dB_up**2)
if np.count_nonzero(np.isfinite(dB)) < 100.:
print "Less than 100 finite values in this pass (orbit %d, %s hemisphere)...skipping" % (oi,hemi)
return
#Determine what is valid data
g = np.isfinite(dB)
ginds = np.flatnonzero(g)
#Remove step-up discontinuities
done = False
origdB = dB.copy()
istep = 0
max_iters = 15
jump_inds = None
while not done:
if self.plot_correction:
#Plot where the jump was found
self.a1.cla()
#self.a1.plot(t,dB,'k.',label='dB',ms=ms)
self.a1.plot(t,dB_along,'m.',label='dB_along',ms=ms)
self.a1.plot(t,dB_across,'g.',label='dB_across',ms=ms)
self.a1.plot(t,dB_up,'b.',label='dB_up',ms=ms)
self.a1.legend()
self.a1.set_title("Step Detection Iteration %d, Orbit %d, %s Hemisphere" % (istep,oi,
hemi))
self.f.canvas.draw()
pp.pause(self.pauselen)
#Detect steps using the field-aligned component of apex coordinates perturbations
#theoretically there will be no perturbations in this component
jumped,done = self.find_one_step_t_test(dB_d3)
#Apply the correction to each component
if not done:
jump_inds = np.flatnonzero(jumped)
#Get the corrections for this particular jump
this_d1_correction = self.compute_correction(dB_d1,jump_inds)
this_d2_correction = self.compute_correction(dB_d2,jump_inds)
this_fa_correction = self.compute_correction(dB_d3,jump_inds)
this_along_correction = self.compute_correction(dB_along,jump_inds)
this_across_correction = self.compute_correction(dB_across,jump_inds)
this_up_correction = self.compute_correction(dB_up,jump_inds)
jump_size_d3 = this_fa_correction[np.nanargmax(np.abs(this_fa_correction))]
jump_size_along = np.nanmean(this_along_correction)
jump_size_across = np.nanmean(this_across_correction)
jump_size_up = np.nanmean(this_up_correction)
print "(Orbit %d)(Hemisphere: %s)(Iter %d): Removing %.3f minute long %.3f nT (field-aligned direction) step." % (oi,
hemi,istep,
(t[jump_inds[-1]]-t[jump_inds[0]])/60.,
jump_size_d3)
#Apply the correction to THIS PASS's data
dB_d1 = dB_d1 - this_d1_correction
dB_d2 = dB_d2 - this_d2_correction
dB_d3 = dB_d3 - this_fa_correction
dB_along = dB_along - this_along_correction
dB_across = dB_across - this_across_correction
dB_up = dB_up - this_up_correction
#Show Pass Correction
if self.plot_correction:
#Plot and draw
self.a1.cla()
#self.a1.plot(t,origdB,'k.',label='Scalar Pertrb',ms=ms)
#self.a1.plot(t,dB,'r.',label='Corr Scalar by %.3f nT' % (jump_size_scalar),ms=ms)
self.a1.plot(t,dB_along,'m.',label='Corr dB_along by %.3fnT' % (jump_size_along),ms=ms)
self.a1.plot(t,dB_across,'g.',label='Corr dB_across by %.3fnT' % (jump_size_across),ms=ms)
self.a1.plot(t,dB_up,'b.',label='Corr dB_up by %.3fnT' % (jump_size_up),ms=ms)
self.a1.axvspan(t[jump_inds[0]],t[jump_inds[-1]],alpha=.3,color='red')
self.a1.legend()
self.a1.set_title("Step Removal Iteration %d, Orbit %d, %s Hemisphere" % (istep,oi,
hemi))
self.f.canvas.draw()
pp.pause(self.pauselen)
#Apply the correction to the WHOLE DAY's data
self.reader.dB_along[passinds] = self.reader.dB_along[passinds] - this_along_correction
self.reader.dB_across[passinds] = self.reader.dB_across[passinds] - this_across_correction
self.reader.dB_up[passinds] = self.reader.dB_up[passinds] - this_up_correction
#Apply the correction to the WHOLE DAY's data
self.reader.dBd1[passinds] = self.reader.dBd1[passinds] - this_d1_correction
self.reader.dBd2[passinds] = self.reader.dBd2[passinds] - this_d2_correction
self.reader.dBd3[passinds] = self.reader.dBd3[passinds] - this_fa_correction
istep+=1
#Prevent possible infinite loop
if istep >= max_iters:
done = True
def repair_all_passes(self):
for oi,hemi in self.reader.iter_all_orbit_indices():
self.repair_pass(oi,hemi)
if self.validation_plots:
self.plot_difference(oi,hemi)
#We are done, update the data in the CDF unless we are doing a dry run
#Recall that SSM coordinates are a little counter-intuitive because the instrument boom is on the side
#of the spacecraft
#SSM coordinates x - down, y - along, z - across-right
if self.modifycdf:
#Create the final results array
DELTA_B_SC = np.column_stack((-1*self.dB_up,self.dB_along,-1*self.dB_across))
DELTA_B_APX = np.column_stack((self.dB_d1,self.dB_d2,self.dB_d3))
self.reader.cdf.readonly(False)
self.reader.cdf['DELTA_B_SC_STEPCOR'] = DELTA_B_SC
self.reader.cdf['DELTA_B_APX_STEPCOR'] = DELTA_B_SC
self.reader.cdf.save()
def compute_correction(self,dB,jumpinds,mode='interpolate'):
"""
Determines how to correct baseline for jump up and corresponding jump down
Gist is that you determine how much the jump up was relative to the mean value before the jump
and correspondly, how much the jump back down was relative the the mean value after the jump.
dB is magnetic perturbation with NaN's NOT removed
jumpinds is the indices of all points in dB between the jump up and jump down
Returns a correction array of length dB, but with zeros outside the range of the jump
The amount of the correction is linearly interpolated between the amount of the jump up
and the amount of the jump down
"""
#Get change amount (start of jump region)
deltadB_start = np.nanmean(dB[jumpinds[0]+1:jumpinds[0]+10]) - np.nanmean(dB[jumpinds[0]-10:jumpinds[0]-1])
#Get change amount (end of jump region)
deltadB_end = np.nanmean(dB[jumpinds[-1]-10:jumpinds[-1]-1]) - np.nanmean(dB[jumpinds[-1]+1:jumpinds[-1]+10])
#print "Start of step: %3.f nT" % (deltadB_start)
#print "End of step: %3.f nT" % (deltadB_end)
correction = np.zeros_like(dB)
if mode == 'interpolate':
#Compute difference in start and end deltas
se_diff = deltadB_end - deltadB_start
#Compute slope of linear interpolation
slope = se_diff/len(jumpinds)
x = jumpinds-jumpinds[0]+1
correction[jumpinds] = slope*x + deltadB_start
elif mode == 'smallest':
#Just use the smallest of the two possible corrections on the theory that it's better
#to undercorrect instead of overcorrect
correction[jumpinds] = deltadB_start if deltadB_start > deltadB_end else deltadB_end
return correction
def find_one_step_t_test(self,x,window_width=40):
"""Attempt to find step in some perturbation data by applying the student-t test to see if
one half of a sliding window is distributed differently than the other half, indicating that there
is a change in the mean.
Note:
Is resiliant to NaN in x
"""
jumped = np.zeros_like(x,dtype=bool)
#g = np.flatnonzero(np.isfinite(x))
#x = x[g]
half_width = int(window_width/2)+1
ts,dmeans = np.zeros_like(x),np.zeros_like(x)
jumps,jumpsizes = [],[]
i = half_width+1
while i + half_width < len(x):
wind_x = x[i-half_width:i+half_width]
#Don't do computation if more than half points in window are NaN
if np.count_nonzero(np.isfinite(wind_x)) < window_width/2:
ts[i] = np.nan
dmeans[i] = np.nan
i+=1
continue
else:
left_half = wind_x[:half_width-1]
right_half = wind_x[half_width+1:]
#Compute the T statistic
Xbarl,Xbarr = np.nanmean(left_half),np.nanmean(right_half)
sl,sr = np.nanstd(left_half),np.nanstd(right_half)
#Have to be careful, because nanmean and nanstd implicitly reduce
#N, the sample size if there are missing values
Nl,Nr = np.count_nonzero(np.isfinite(left_half)),np.count_nonzero(np.isfinite(right_half))
DeltaXbar = Xbarr-Xbarl
#Welch's T statistic
t = -1*DeltaXbar/np.sqrt(sl**2/Nl+sr**2/Nr)
#Results
ts[i] = t
dmeans[i] = DeltaXbar
i+=1
#This termination condition is a bit arbitrary. We
#could do better if we knew the approximate minimum size of the jump
if np.count_nonzero(ts) < window_width:
#Just return all false and done if there isn't any data
return jumped,True
#Find the index of the worst down and up steps
isplit_up = np.nanargmin(ts)
isplit_down = np.nanargmax(ts)
if isplit_up > isplit_down:
x_down = np.abs(np.nanmean(x[isplit_down-5:isplit_down-1])) # Mean of x before down
x_up = np.abs(np.nanmean(x[isplit_up+1:isplit_up+5])) # Mean of x after up
injump = np.arange(isplit_down,isplit_up)
elif isplit_down > isplit_up:
x_up = np.abs(np.nanmean(x[isplit_up-5:isplit_up-1])) # Mean of x before up
x_down = np.abs(np.nanmean(x[isplit_down+1:isplit_down+5])) # Mean of x after down
injump = np.arange(isplit_up,isplit_down)
up_unambigous = np.abs(ts[isplit_up]) > 3*x_up
down_unambigous = np.abs(ts[isplit_down]) > 3*x_down
if up_unambigous and down_unambigous:
jumped[injump] = True
done = False
else:
done = True
if self.plot_correction:
self.a1.cla()
t = np.arange(len(x))
self.a1.plot(t,x,'b.',label='Original',ms=3)
#self.a1.plot(t,xcorr,'g.',label='Roughly Corrected',ms=3)
self.a1.plot(t,ts,'r.',label='t-Statistic',ms=3)
self.a1.plot(t,dmeans,'k.',label='Window Mean Diff',ms=3)
self.a1.set_title("Welch's t-statistic step detection (%d point window)" % (window_width))
self.a1.axvspan(t[injump[0]],t[injump[-1]],alpha=.3,color='red')
self.a1.plot(t[isplit_down],ts[isplit_down],'go')
self.a1.plot(t[isplit_up],ts[isplit_up],'go')
self.f.canvas.draw()
self.a1.legend()
pp.pause(self.pauselen)
return jumped,done
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="DMSP SSM step discontinuite detection and removal")
parser.add_argument("cdffile", help='Remove step discontinuities from this DMSP SSM CDF file',type=str,default=None)
parser.add_argument("--showplots",help="Show the step removal in action",action='store_true',default=False)
parser.add_argument("--modifycdf",help="Modify the CDF (create 2 new variables for corrected apex and spacecraft dB",default=False)
args = parser.parse_args()
if not os.path.exists(args.cdffile):
raise IOError("Sorry, but the specified CDF file %s appears to not exist!" % (args.cdffile))
remover = ssm_step_remover(args.cdffile,plot_correction=args.showplots,modifycdf=args.modifycdf)
remover.repair_all_passes() | mit |
jeremiedecock/snippets | python/tkinter/python3/animation_canvas_rain.py | 1 | 3242 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Inspired by http://www.labri.fr/perso/nrougier/teaching/matplotlib/matplotlib.html#animation
# See also: http://effbot.org/tkinterbook/widget.htm
import tkinter as tk
import random
SIZE = 500
MIN_RADIUS = 15
MAX_RADIUS = 30
NUM_CIRCLES = 50
FPS = 25
TIME_STEP_MS = int(1000 / FPS)
def main():
"""Main function"""
root = tk.Tk()
canvas = tk.Canvas(root, width=SIZE, height=SIZE, background="white")
canvas.pack()
for index in range(NUM_CIRCLES):
x = random.randint(0, SIZE)
y = random.randint(0, SIZE)
radius = random.randint(MIN_RADIUS, MAX_RADIUS)
canvas.create_oval(x - radius,
y - radius,
x + radius,
y + radius,
width=1)
def update_canvas():
# Update all circles
for tag in canvas.find_all():
coordinates = canvas.coords(tag) # Get the circle's coordinates
diameter = coordinates[2] - coordinates[0]
if diameter < 2 * MAX_RADIUS:
coordinates[0] -= 1
coordinates[1] -= 1
coordinates[2] += 2
coordinates[3] += 2
else:
x = random.randint(0, SIZE)
y = random.randint(0, SIZE)
coordinates[0] = x - MIN_RADIUS
coordinates[1] = y - MIN_RADIUS
coordinates[2] = x + MIN_RADIUS
coordinates[3] = y + MIN_RADIUS
diameter = coordinates[2] - coordinates[0]
# Redraw the ball
canvas.coords(tag, *coordinates) # Change coordinates
alpha = int(100. * diameter / (2. * MAX_RADIUS))
color = "gray" + str(alpha)
canvas.itemconfig(tag, outline=color) # Change color
# Reschedule event in TIME_STEP_MS ms
root.after(TIME_STEP_MS, update_canvas)
# Schedule event in TIME_STEP_MS ms
root.after(TIME_STEP_MS, update_canvas)
root.mainloop()
if __name__ == '__main__':
main()
| mit |
rdhyee/dlab-finance | basic-taq/raw_taq.py | 1 | 12459 | # This file currently depends on python 3.3+
from zipfile import ZipFile
from datetime import datetime
from pytz import timezone
import numpy as np
from numpy.lib import recfunctions
import tables as tb
# Note that the '|' character means byte order doesn't matter, which it never
# will for "bytes" (which is what 'S' stands for - it doesn't stand for
# "string")
initial_dtype = [('Time', 'S9'), # HHMMSSmmm, should be in Eastern Time (ET)
# ('hour', '|S2'),
# ('minute', '|S2'),
# ('second', '|S2'),
# ('msec', '|S3'),
('Exchange', 'S1'),
# Wikipedia has a nice explanation of symbols here:
# https://en.wikipedia.org/wiki/Ticker_symbol
('Symbol_root', 'S6'),
('Symbol_suffix', 'S10'),
('Bid_Price', 'S11'), # 7.4 (fixed point)
('Bid_Size', 'S7'),
('Ask_Price', 'S11'), # 7.4
('Ask_Size', 'S7'),
('Quote_Condition', 'S1'),
# Market_Maker ends up getting discarded, it should always be b' '
('Market_Maker', 'S4'),
('Bid_Exchange', 'S1'),
('Ask_Exchange', 'S1'),
('Sequence_Number', 'S16'),
('National_BBO_Ind', 'S1'),
('NASDAQ_BBO_Ind', 'S1'),
('Quote_Cancel_Correction', 'S1'),
('Source_of_Quote', 'S1'),
('Retail_Interest_Indicator_RPI', 'S1'),
('Short_Sale_Restriction_Indicator', 'S1'),
('LULD_BBO_Indicator_CQS', 'S1'),
('LULD_BBO_Indicator_UTP', 'S1'),
('FINRA_ADF_MPID_Indicator', 'S1'),
('SIP_generated_Message_Identifier', 'S1'),
('National_BBO_LULD_Indicator', 'S1'),
('newline', 'S2')]
# This could be computed from the above bytes? - Would be easy if we change the
# above to a more convenient format.
BYTES_PER_LINE = 98
# Justin and Pandas (I think) use time64, as does PyTables.
# We could use msec from beginning of day for now in an int16
# (maybe compare performance to datetime64? But dates should compress very well...)
time_col = 'Time'
convert_dtype = [
('Bid_Price', np.float64),
('Bid_Size', np.int32),
('Ask_Price', np.float64),
('Ask_Size', np.int32),
# ('Market_Maker', np.int8), # This is not currently used, and should always be b' '
('Sequence_Number', np.int64),
# ('National_BBO_Ind', np.int8), # The _Ind fields are actually categorical - leaving as strings
# ('NASDAQ_BBO_Ind', np.int8),
]
passthrough_strings = ['Exchange',
'Symbol_root',
'Symbol_suffix',
'Quote_Condition',
'Bid_Exchange',
'Ask_Exchange',
'National_BBO_Ind', # The _Ind fields are actually categorical - leaving as strings
'NASDAQ_BBO_Ind',
'Quote_Cancel_Correction',
'Source_of_Quote',
'Retail_Interest_Indicator_RPI',
'Short_Sale_Restriction_Indicator',
'LULD_BBO_Indicator_CQS',
'LULD_BBO_Indicator_UTP',
'FINRA_ADF_MPID_Indicator',
'SIP_generated_Message_Identifier',
'National_BBO_LULD_Indicator']
# Lifted from blaze.pytables
def dtype_to_pytables(dtype):
""" Convert NumPy dtype to PyTable descriptor
Examples
--------
>>> from tables import Int32Col, StringCol, Time64Col
>>> dt = np.dtype([('name', 'S7'), ('amount', 'i4'), ('time', 'M8[us]')])
>>> dtype_to_pytables(dt) # doctest: +SKIP
{'amount': Int32Col(shape=(), dflt=0, pos=1),
'name': StringCol(itemsize=7, shape=(), dflt='', pos=0),
'time': Time64Col(shape=(), dflt=0.0, pos=2)}
"""
d = {}
for pos, name in enumerate(dtype.names):
dt, _ = dtype.fields[name]
if issubclass(dt.type, np.datetime64):
tdtype = tb.Description({name: tb.Time64Col(pos=pos)}),
else:
tdtype = tb.descr_from_dtype(np.dtype([(name, dt)]))
el = tdtype[0] # removed dependency on toolz -DJC
getattr(el, name)._v_pos = pos
d.update(el._v_colobjects)
return d
def record_len_to_last_column(initial_dtype):
"""
initial_dtype of form:
[('Time', 'S9'),
('Exchange', 'S1'),
....
('newline', 'S2')]
Assumption is that the last field is a newline field that is present in all versions of BBO
"""
cum_len = 0
cum_lens = []
flens = [(field, int(dtype[1:])) for (field, dtype) in initial_dtype]
newline_len = flens[-1][1]
for (i,(field, flen)) in enumerate(flens[:-1]):
cum_len += flen
cum_lens.append((cum_len+newline_len, i))
return dict(cum_lens)
# The "easy" dtypes are the "not datetime" dtypes
easy_dtype = []
convert_dict = dict(convert_dtype)
for name, dtype in initial_dtype:
if name in convert_dict:
easy_dtype.append( (name, convert_dict[name]) )
elif name in passthrough_strings:
easy_dtype.append( (name, dtype) )
# PyTables will not accept np.datetime64, we hack below, but we use it to work
# with the blaze function above.
# We also shift Time to the end (while I'd rather maintain order), as it's more
# efficient for Dav given the technical debt he's already built up.
pytables_dtype = easy_dtype + [('Time', 'datetime64[ms]')]
pytables_desc = dtype_to_pytables( np.dtype(pytables_dtype) )
RECORD_LEN_TO_LAST_COLUMN_MAP = record_len_to_last_column(initial_dtype)
# TODO HDF5 will be broken for now
class TAQ2Chunks:
'''Read in raw TAQ BBO file, and return numpy chunks (cf. odo)'''
def __init__(self, taq_fname, chunksize=1000000, process_chunk=False):
self.taq_fname = taq_fname
self.chunksize = chunksize
self.process_chunk = process_chunk
self.numlines = None
self.year = None
self.month = None
self.day = None
self.iter_ = self.convert_taq()
def __len__(self):
return self.numlines
def __iter__(self):
return self
def __next__(self):
return next(self.iter_)
def convert_taq(self):
'''Return a generator that yields chunks
chunksize : int
Number of rows in each chunk
'''
# The below doesn't work for pandas (and neither does `unzip` from the
# command line). Probably want to use something like `7z x -so
# my_file.zip 2> /dev/null` if we use pandas.
with ZipFile(self.taq_fname) as zfile:
for inside_f in zfile.filelist:
# The original filename is available as inside_f.filename
self.infile_name = inside_f.filename
with zfile.open(inside_f.filename) as infile:
first = infile.readline()
bytes_per_line = len(first)
dtype = (initial_dtype[:RECORD_LEN_TO_LAST_COLUMN_MAP[bytes_per_line]+1] +
[initial_dtype[-1]])
# You need to use bytes to split bytes
# some files (probably older files do not have a record count)
try:
dateish, numlines = first.split(b":")
self.numlines = int(numlines)
# Get dates to combine with times later
# This is a little over-trusting of the spec...
self.month = int(dateish[2:4])
self.day = int(dateish[4:6])
self.year = int(dateish[6:10])
except:
pass
if self.process_chunk:
yield from self.chunks(self.numlines, infile, self.chunksize) # noqa
else:
more_bytes = True
while (more_bytes):
raw_bytes = infile.read(bytes_per_line * self.chunksize)
all_strings = np.ndarray(len(raw_bytes) // bytes_per_line,
buffer=raw_bytes, dtype=dtype)
if raw_bytes:
yield (all_strings)
else:
more_bytes = False
def process_chunk(self, all_strings):
# This is unnecessary copying
easy_converted = all_strings.astype(easy_dtype)
# These don't have the decimal point in the TAQ file
for dollar_col in ['Bid_Price', 'Ask_Price']:
easy_converted[dollar_col] /= 10000
# Currently, there doesn't seem to be any utility to converting to
# numpy.datetime64 PyTables wants float64's corresponding to the POSIX
# Standard (relative to 1970-01-01, UTC)
converted_time = [datetime(self.year, self.month, self.day,
int(raw[:2]), int(raw[2:4]), int(raw[4:6]),
# msec must be converted to microsec
int(raw[6:9]) * 1000,
tzinfo=timezone('US/Eastern') ).timestamp()
for raw in all_strings['Time'] ]
# More unnecessary copying
records = recfunctions.append_fields(easy_converted, 'Time',
converted_time, usemask=False)
return records
def chunks(self, numlines, infile, chunksize=None):
'''Do the conversion of bytes to numpy "chunks"'''
# Should do check on numlines to make sure we get the right number
while(True):
raw_bytes = infile.read(BYTES_PER_LINE * chunksize)
if not raw_bytes:
break
# If we use asarray with this dtype, it crashes Python! (might not be true anymore)
# ndarray gives 'S' arrays instead of chararrays (as recarray does)
all_strings = np.ndarray(chunksize, buffer=raw_bytes, dtype=initial_dtype)
# This approach doesn't work...
# out[chunk_start:chunk_stop, 1:] = all_strings[:,1:-1]
yield self.process_chunk(all_strings)
# Everything from here down is HDF5 specific
# def setup_hdf5(self, h5_fname_root, numlines):
# # We're using aggressive compression and checksums, since this will
# # likely stick around Stopping one level short of max compression -
# # don't be greedy.
# self.h5 = tb.open_file(h5_fname_root + '.h5', title=h5_fname_root,
# mode='w', filters=tb.Filters(complevel=8,
# complib='blosc:lz4hc',
# fletcher32=True) )
# return self.h5.create_table('/', 'daily_quotes', description=pytables_desc, expectedrows=numlines)
# def finalize_hdf5(self):
# self.h5.close()
# def to_hdf5(self, numlines, infile, out, chunksize=None):
# '''Read raw bytes from TAQ, write to HDF5'''
# # Should I use a context manager here?
# h5_table = self.setup_hdf5(inside_f.filename, numlines)
# try:
# self.to_hdf5(numlines, infile, h5_table)
# finally:
# self.finalize_hdf5()
# # at some point, we might optimize chunksize. For now, assume PyTables is smart
# if chunksize is None:
# chunksize = out.chunkshape[0]
# for chunk in self.to_chunks(numlines, infile, chunksize):
# out.append(chunk)
if __name__ == '__main__':
from sys import argv
from glob import glob
try:
fname = argv[1]
except IndexError:
# Grab the first BBO file we can find
fname = glob('../local_data/EQY_US_ALL_BBO_*.zip')[0]
test_run = TAQ2Chunks(fname)
test_run.to_hdf5()
| isc |
etraiger/PCWG | turbine.py | 1 | 21137 | import math
import interpolators
import scipy.interpolate
import numpy as np
import scipy as sp
from scipy import stats
import sys
import pandas as pd
class PowerCurve:
def __init__(self, powerCurveLevels, referenceDensity, rotorGeometry, powerCol, turbCol, wsCol = None,
countCol = None, fixedTurbulence = None, ratedPower = None,turbulenceRenormalisation=True,
name = 'Undefined', interpolationMode = 'Cubic'):
self.actualPower = powerCol #strings defining column names
self.inputHubWindSpeed = wsCol
self.hubTurbulence = turbCol
self.dataCount = countCol
self.name = name
self.interpolationMode = interpolationMode
if (self.hubTurbulence is not None) and fixedTurbulence != None:
raise Exception("Cannot specify both turbulence levels and fixed turbulence")
self.availablePower = AvailablePower(rotorGeometry.area, referenceDensity)
self.powerCurveLevels = powerCurveLevels
self.referenceDensity = referenceDensity
self.rotorGeometry = rotorGeometry
has_pc = len(self.powerCurveLevels.index) != 0
self.firstWindSpeed = min(self.powerCurveLevels.index) if has_pc else None
self.cutInWindSpeed = self.calculateCutInWindSpeed(powerCurveLevels) if has_pc else None
self.cutOutWindSpeed = self.calculateCutOutWindSpeed(powerCurveLevels) if has_pc else None
if self.inputHubWindSpeed is None:
ws_data = None
else:
ws_data = powerCurveLevels[self.inputHubWindSpeed]
self.powerFunction = self.createFunction(powerCurveLevels[self.actualPower], ws_data) if has_pc else None
self.ratedPower = self.getRatedPower(ratedPower, powerCurveLevels[self.actualPower]) if has_pc else None
if 'Data Count' in self.powerCurveLevels.columns:
self.hours = self.powerCurveLevels['Data Count'].sum()*1.0/6.0
else:
self.hours = 0.0
self.turbulenceFunction = self.createFunction(powerCurveLevels[self.hubTurbulence], ws_data) if has_pc else None
if (turbulenceRenormalisation and has_pc):
print "Calculating zero turbulence curve for {0} Power Curve".format(self.name)
try:
self.calcZeroTurbulencePowerCurve()
print "Calculation of zero turbulence curve for {0} Power Curve successful".format(self.name)
except Exception as error:
print error
print "Calculation of zero turbulence curve for {0} Power Curve unsuccessful".format(self.name)
self.zeroTurbulencePowerCurve = None
self.simulatedPower = None
def calcZeroTurbulencePowerCurve(self):
keys = sorted(self.powerCurveLevels[self.actualPower].keys())
integrationRange = IntegrationRange(0.0, 100.0, 0.1)
self.zeroTurbulencePowerCurve = ZeroTurbulencePowerCurve(keys, self.getArray(self.powerCurveLevels[self.actualPower], keys), self.getArray(self.powerCurveLevels[self.hubTurbulence], keys), integrationRange, self.availablePower)
self.simulatedPower = SimulatedPower(self.zeroTurbulencePowerCurve, integrationRange)
def getRatedPower(self, ratedPower, powerCurveLevels):
if ratedPower == None:
return powerCurveLevels.max()
else:
return ratedPower
def getThresholdWindSpeed(self):
return float(interpolators.LinearPowerCurveInterpolator(self.powerCurveLevels[self.actualPower].as_matrix(), list(self.powerCurveLevels[self.actualPower].index))(0.85*self.ratedPower))
def getTurbulenceLevels(self, powerCurveLevels, turbulenceLevels, fixedTurbulence):
if fixedTurbulence != None:
turbulenceLevels = pd.Series(index = powerCurveLevels.index)
for level in powerCurveLevels.index:
turbulenceLevels[level] = fixedTurbulence
else:
turbulenceLevels = turbulenceLevels
return turbulenceLevels
def getArray(self, dictionary, keys):
array = []
for key in keys:
array.append(dictionary[key])
return array
def createFunction(self, y_data, x_data):
if x_data is None:
x_data = pd.Series(y_data.index, index = y_data.index)
x, y = [], []
for i in y_data.index:
if i in x_data.index:
x.append(x_data[i])
else:
x.append(i)
y.append(y_data[i])
if self.interpolationMode == 'Linear':
return interpolators.LinearPowerCurveInterpolator(x, y)
elif self.interpolationMode == 'Cubic':
return interpolators.CubicPowerCurveInterpolator(x, y, self.cutOutWindSpeed)
else:
raise Exception('Unknown interpolation mode: %s' % self.interpolationMode)
def power(self, windSpeed, turbulence = None, extraTurbCorrection = False):
referencePower = self.powerFunction(windSpeed)
if turbulence == None:
power = referencePower
else:
referenceTurbulence = self.referenceTurbulence(windSpeed)
power = referencePower + self.simulatedPower.power(windSpeed, turbulence) - self.simulatedPower.power(windSpeed, referenceTurbulence)
if extraTurbCorrection: power *= self.calculateExtraTurbulenceCorrection(windSpeed, turbulence, referenceTurbulence)
power = max([0.0, power])
power = min([self.ratedPower, power])
return power
def calculateExtraTurbulenceCorrection(self, windSpeed, turbulence, referenceTurbulence):
saddle = 9.0
xprime = saddle - windSpeed
tprime = (referenceTurbulence - turbulence) / referenceTurbulence
if xprime < 0.0 or tprime < 0.0: return 1.0
a = -0.02 * math.tanh(2.0 * tprime)
b = -0.03 * (math.exp(1.5 * tprime) - 1.0)
loss = a * xprime + b
return 1 + loss
def referenceTurbulence(self, windSpeed):
if windSpeed < self.firstWindSpeed:
return self.turbulenceFunction(self.firstWindSpeed)
elif windSpeed > self.cutOutWindSpeed:
return self.turbulenceFunction(self.cutOutWindSpeed)
else:
return self.turbulenceFunction(windSpeed)
def calculateCutInWindSpeed(self, powerCurveLevels):
return min(self.nonZeroLevels(powerCurveLevels))
def calculateCutOutWindSpeed(self, powerCurveLevels):
return max(self.nonZeroLevels(powerCurveLevels))
def nonZeroLevels(self, powerCurveLevels):
levels = []
for windSpeed in self.powerCurveLevels.index:
if self.powerCurveLevels[self.actualPower][windSpeed] > 0.0:
levels.append(windSpeed)
return levels
def __str__(self):
value = "Wind Speed\tPower\n"
for windSpeed in self.powerCurveLevels:
value += "%0.2f\t%0.2f\n" % (windSpeed, self.power(windSpeed))
return value
class RotorGeometry:
def __init__(self, diameter, hubHeight):
self.diameter = diameter
self.radius = diameter / 2
self.area = math.pi * self.radius ** 2
self.hubHeight = hubHeight
self.lowerTip = self.hubHeight - self.radius
self.upperTip = self.hubHeight + self.radius
def withinRotor(self, height):
return height > self.lowerTip and height < self.upperTip
class InterpolatedNormDist:
def __init__(self):
#speed optimisation
self.xstep = 0.05
self.xend = 5.0
self.xstart = -self.xend
self.steps = int((self.xend - self.xstart) / self.xstep) + 1
x = np.linspace(self.xstart, self.xend, self.steps)
y = []
normDist = NormDist()
for i in range(len(x)):
y.append(normDist.probability(x[i], 0.0, 1.0))
self.f = scipy.interpolate.interp1d(x, y, bounds_error = False, fill_value = 0.0)
def probability(self, windSpeed, windSpeedMean, windSpeedStandardDeviation):
oneOverStandardDeviation = 1.0 / windSpeedStandardDeviation
standardDeviationsFromMean = oneOverStandardDeviation * (windSpeed - windSpeedMean)
return self.f(standardDeviationsFromMean) * oneOverStandardDeviation
class DictionaryNormDist:
def __init__(self):
#speed optimisation
self.decimalPlaces = 2
self.xstep = 0.1 ** self.decimalPlaces
self.xend = 5.0
self.xstart = -self.xend
x = np.arange(self.xstart, self.xend + self.xstep, self.xstep)
self.dictionary = {}
normDist = NormDist()
for i in range(len(x)):
self.dictionary[self.key(x[i])] = normDist.probability(x[i], 0.0, 1.0)
def probability(self, windSpeed, windSpeedMean, windSpeedStandardDeviation):
oneOverStandardDeviation = self.oneOver(windSpeedStandardDeviation)
standardDeviationsFromMean = self.standardDeviationsFromMean(windSpeed, windSpeedMean, oneOverStandardDeviation)
if self.inDictionary(standardDeviationsFromMean):
return self.lookUpDictionary(standardDeviationsFromMean) * oneOverStandardDeviation
else:
return 0.0
def oneOver(self, value):
return 1.0 / value
def standardDeviationsFromMean(self, value, mean, oneOverStandardDeviation):
return oneOverStandardDeviation * (value - mean)
def inDictionary(self, value):
if value < self.xstart: return False
if value > self.xend: return False
return True
def lookUpDictionary(self, value):
return self.dictionary[self.key(value)]
def key(self, value):
return round(value, self.decimalPlaces)
class IntegrationProbabilities:
def __init__(self, windSpeeds, windSpeedStep):
#speed otpimised normal distribution
self.windSpeeds = windSpeeds
self.a = windSpeedStep / math.sqrt(2.0 * math.pi)
def probabilities(self, windSpeedMean, windSpeedStdDev):
if windSpeedStdDev == 0:
return np.nan
oneOverStandardDeviation = 1.0 / windSpeedStdDev
oneOverStandardDeviationSq = oneOverStandardDeviation * oneOverStandardDeviation
b = self.a * oneOverStandardDeviation
c = -0.5 * oneOverStandardDeviationSq
windSpeedMinusMeans = (self.windSpeeds - windSpeedMean)
windSpeedMinusMeanSq = windSpeedMinusMeans * windSpeedMinusMeans
d = c * windSpeedMinusMeanSq
return b * np.exp(d)
class IntegrationRange:
def __init__(self, minimumWindSpeed, maximumWindSpeed, windSpeedStep):
self.minimumWindSpeed = minimumWindSpeed
self.maximumWindSpeed = maximumWindSpeed
self.windSpeedStep = windSpeedStep
self.windSpeeds = np.arange(minimumWindSpeed, maximumWindSpeed, windSpeedStep)
self.integrationProbabilities = IntegrationProbabilities(self.windSpeeds, self.windSpeedStep)
def probabilities(self, windSpeedMean, windSpeedStdDev):
return self.integrationProbabilities.probabilities(windSpeedMean, windSpeedStdDev)
class AvailablePower:
def __init__(self, area, density):
self.area = area
self.density = density
def power(self, windSpeed):
return 0.5 * self.density * self.area * windSpeed * windSpeed * windSpeed / 1000.0
def powerCoefficient(self, windSpeed, actualPower):
return actualPower / self.power(windSpeed)
class ZeroTurbulencePowerCurve:
def __init__(self, referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower):
self.integrationRange = integrationRange
self.initialZeroTurbulencePowerCurve = InitialZeroTurbulencePowerCurve(referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower)
simulatedReferencePowerCurve = SimulatedPowerCurve(referenceWindSpeeds, self.initialZeroTurbulencePowerCurve, referenceTurbulences, integrationRange)
self.windSpeeds = referenceWindSpeeds
self.powers = []
for i in range(len(self.windSpeeds)):
power = referencePowers[i] - simulatedReferencePowerCurve.powers[i] + self.initialZeroTurbulencePowerCurve.powers[i]
self.powers.append(power)
#print "%f %f" % (self.windSpeeds[i], self.powers[i])
self.powerFunction = scipy.interpolate.interp1d(self.windSpeeds, self.powers)
self.minWindSpeed = min(self.windSpeeds)
self.maxWindSpeed = max(self.windSpeeds)
self.maxPower = max(self.powers)
self.dfPowerLevels = pd.DataFrame(self.powers, index = self.windSpeeds, columns = ['Power'])
def power(self, windSpeed):
if windSpeed <= self.minWindSpeed:
return 0.0
elif windSpeed >= self.maxWindSpeed:
return self.maxPower
else:
return self.powerFunction(windSpeed)
class InitialZeroTurbulencePowerCurve:
def __init__(self, referenceWindSpeeds, referencePowers, referenceTurbulences, integrationRange, availablePower):
self.maxIterations = 5
self.integrationRange = integrationRange
self.availablePower = availablePower
self.referenceWindSpeeds = referenceWindSpeeds
self.referencePowers = referencePowers
self.referenceTurbulences = referenceTurbulences
self.referencePowerCurveStats = IterationPowerCurveStats(referenceWindSpeeds, referencePowers, availablePower)
#print "%f %f %f" % (self.referencePowerCurveStats.ratedPower, self.referencePowerCurveStats.cutInWindSpeed, self.referencePowerCurveStats.cpMax)
self.selectedStats = self.solve(self.referencePowerCurveStats)
selectedIteration = InitialZeroTurbulencePowerCurveIteration(referenceWindSpeeds,
self.availablePower,
self.selectedStats.ratedPower,
self.selectedStats.cutInWindSpeed,
self.selectedStats.cpMax)
self.ratedWindSpeed = selectedIteration.ratedWindSpeed
self.windSpeeds = selectedIteration.windSpeeds
self.powers = selectedIteration.powers
self.power = selectedIteration.power
def solve(self, previousIterationStats, iterationCount = 1):
if iterationCount > self.maxIterations: raise Exception("Failed to solve initial zero turbulence curve in permitted number of iterations")
iterationZeroTurbCurve = InitialZeroTurbulencePowerCurveIteration(self.integrationRange.windSpeeds,
self.availablePower,
previousIterationStats.ratedPower,
previousIterationStats.cutInWindSpeed,
previousIterationStats.cpMax)
iterationSimulatedCurve = SimulatedPowerCurve(self.referenceWindSpeeds, iterationZeroTurbCurve, self.referenceTurbulences, self.integrationRange)
iterationSimulatedCurveStats = IterationPowerCurveStats(iterationSimulatedCurve.windSpeeds, iterationSimulatedCurve.powers, self.availablePower)
convergenceCheck = IterationPowerCurveConvergenceCheck(self.referencePowerCurveStats, iterationSimulatedCurveStats)
#print "%f %f %f" % (iterationSimulatedCurveStats.ratedPower, iterationSimulatedCurveStats.cutInWindSpeed, iterationSimulatedCurveStats.cpMax)
#print "%s %s %s" % (convergenceCheck.ratedPowerConverged, convergenceCheck.cutInConverged, convergenceCheck.cpMaxConverged)
if convergenceCheck.isConverged:
return previousIterationStats
else:
return self.solve(IncrementedPowerCurveStats(previousIterationStats, convergenceCheck), iterationCount + 1)
class IterationPowerCurveConvergenceCheck:
def __init__(self, referenceStats, iterationStats):
self.threholdPowerDiff = referenceStats.ratedPower * 0.001
self.threholdCutInWindSpeedDiff = 0.5
self.threholdCpMaxDiff = 0.01
self.ratedPowerDiff = iterationStats.ratedPower - referenceStats.ratedPower
self.cutInDiff = iterationStats.cutInWindSpeed - referenceStats.cutInWindSpeed
self.cpMaxDiff = iterationStats.cpMax - referenceStats.cpMax
self.ratedPowerConverged = abs(self.ratedPowerDiff) < self.threholdPowerDiff
self.cutInConverged = abs(self.cutInDiff) <= self.threholdCutInWindSpeedDiff
self.cpMaxConverged = abs(self.cpMaxDiff) <= self.threholdCpMaxDiff
self.isConverged = self.ratedPowerConverged and self.cutInConverged and self.cpMaxConverged
class IncrementedPowerCurveStats:
def __init__(self, previousIterationStats, convergenceCheck):
if convergenceCheck.ratedPowerConverged:
self.ratedPower = previousIterationStats.ratedPower
else:
self.ratedPower = previousIterationStats.ratedPower - convergenceCheck.ratedPowerDiff
if convergenceCheck.cutInConverged:
self.cutInWindSpeed = previousIterationStats.cutInWindSpeed
else:
self.cutInWindSpeed = previousIterationStats.cutInWindSpeed - convergenceCheck.cutInDiff
if convergenceCheck.cpMaxConverged:
self.cpMax = previousIterationStats.cpMax
else:
self.cpMax = previousIterationStats.cpMax - convergenceCheck.cpMaxDiff
class InitialZeroTurbulencePowerCurveIteration:
def __init__(self, windSpeeds, availablePower, ratedPower, cutInWindSpeed, cpMax):
self.windSpeeds = windSpeeds
self.powers = []
self.ratedWindSpeed = ((2.0 * ratedPower * 1000.0)/(availablePower.density * cpMax * availablePower.area)) ** (1.0 / 3.0)
self.ratedPower = ratedPower
self.cutInWindSpeed = cutInWindSpeed
self.cpMax = cpMax
self.availablePower = availablePower
for windSpeed in self.windSpeeds:
self.powers.append(self.power(windSpeed))
def power(self, windSpeed):
if windSpeed > self.cutInWindSpeed:
if windSpeed < self.ratedWindSpeed:
return self.availablePower.power(windSpeed) * self.cpMax
else:
return self.ratedPower
else:
return 0.0
class IterationPowerCurveStats:
def __init__(self, windSpeeds, powers, availablePower):
self.ratedPower = max(powers)
thresholdPower = self.ratedPower * 0.001
operatingWindSpeeds = []
cps = []
for i in range(len(windSpeeds)):
windSpeed = windSpeeds[i]
power = powers[i]
cp = availablePower.powerCoefficient(windSpeed, power)
cps.append(availablePower.powerCoefficient(windSpeed, power))
if power >= thresholdPower: operatingWindSpeeds.append(windSpeed)
self.cpMax = max(cps)
if len(operatingWindSpeeds) > 0:
self.cutInWindSpeed = min(operatingWindSpeeds)
else:
self.cutInWindSpeed = 0.0
class SimulatedPower:
def __init__(self, zeroTurbulencePowerCurve, integrationRange):
self.zeroTurbulencePowerCurve = zeroTurbulencePowerCurve
self.integrationRange = integrationRange
integrationPowers = []
for windSpeed in np.nditer(self.integrationRange.windSpeeds):
integrationPowers.append(self.zeroTurbulencePowerCurve.power(windSpeed))
self.integrationPowers = np.array(integrationPowers)
def power(self, windSpeed, turbulence):
standardDeviation = windSpeed * turbulence
integrationProbabilities = self.integrationRange.probabilities(windSpeed, standardDeviation)
return np.sum(integrationProbabilities * self.integrationPowers) / np.sum(integrationProbabilities)
class SimulatedPowerCurve:
def __init__(self, windSpeeds, zeroTurbulencePowerCurve, turbulences, integrationRange):
simulatedPower = SimulatedPower(zeroTurbulencePowerCurve, integrationRange)
self.windSpeeds = windSpeeds
self.turbulences = turbulences
self.powers = []
for i in range(len(windSpeeds)):
windSpeed = windSpeeds[i]
turbulence = turbulences[i]
power = simulatedPower.power(windSpeed, turbulence)
self.powers.append(power)
| mit |
simon-pepin/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
zooniverse/aggregation | experimental/paper/confidence/blankConfidence.py | 2 | 8294 | #!/usr/bin/env python
__author__ = 'greg'
import pymongo
import random
import os
import time
from time import mktime
from datetime import datetime,timedelta
import numpy as np
from scipy.stats import ks_2samp
import cPickle as pickle
from scipy.stats import beta
import matplotlib.pyplot as plt
from scipy.special import gamma as gammaf
from scipy.optimize import fmin
from scipy.stats import ks_2samp
project = "serengeti"
date = "2015-02-22"
# for Greg - which computer am I on?
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
code_directory = base_directory + "/github"
elif os.path.exists("/Users/greg"):
base_directory = "/Users/greg"
code_directory = base_directory + "/Code"
else:
base_directory = "/home/greg"
code_directory = base_directory + "/github"
client = pymongo.MongoClient()
db = client[project+"_"+date]
classification_collection = db[project+"_classifications"]
subject_collection = db[project+"_subjects"]
user_collection = db[project+"_users"]
# user_names = []
# for user in user_collection.find({"classification_count":{"$gt":100}}):
# name = user["name"]
#
# if not(name in ["brian-c"]):
# user_names.append(name)
#
# sample_users = random.sample(list(user_names),min(2,len(list(user_names))))
# for classification in classification_collection.find({"tutorial":{"$ne":True},"user_name":{"$nin":["brian-c","parrish","arfon","kosmala"]}}):
# zooniverse_id = classification["subjects"][0]["zooniverse_id"]
# subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
#
# print classification
# break
# for jj,user in enumerate(sample_users):
# print user
# times = []
# correct_blanks = []
# false_blanks = []
# u = user_collection.find_one({"name":user})
names_to_skip = ["brian-c","parrish","arfon","kosmala","aliburchard","davidmill","laurawhyte","mschwamb","kellinora","ttfnrob"]
def classifications_to_retirement(zooniverse_id):
num_votes = 0
for num_votes, classification in enumerate(classification_collection.find({"subjects.0.zooniverse_id":zooniverse_id}).limit(25)):
classified = []
for ann in classification["annotations"]:
if "species" in ann:
classified.append(ann["species"])
classified.sort()
classified = tuple(classified)
if not(classified in all_classifications):
all_classifications[classified] = 1
else:
all_classifications[classified] += 1
#print all_classifications
if max(all_classifications.values()) == 10:
break
return num_votes+1
times = {}
baseline_votes = []
# for subject_count,subject in enumerate(subject_collection.find({"tutorial":{"$ne":True},"state":"complete","metadata.retire_reason":{"$nin":["blank"]}}).limit(100)):
# print subject_count
# zooniverse_id = subject["zooniverse_id"]
# #print zooniverse_id
#
# all_classifications = {}
# num_votes = classifications_to_retirement(zooniverse_id)
#
# baseline_votes.append(num_votes+1)
#print np.mean(baseline_votes),np.median(baseline_votes),np.std(votes)
#assert False
print "now reading in classifications"
for ii,classification in enumerate(classification_collection.find({"tutorial":{"$ne":True},"user_name":{"$nin":names_to_skip}}).limit(600000)):
try:
name = classification["user_name"]
except KeyError:
continue
print classification["annotations"]
zooniverse_id = classification["subjects"][0]["zooniverse_id"]
annotations = classification["annotations"]
keys = [ann.keys() for ann in annotations]
timing_index = keys.index([u'started'])
timing = annotations[timing_index]
started_at = timing["started"]
# u'Tue, 16 Sep 2014 16:11:58 GMT'
classify_time = time.strptime(started_at,"%a, %d %b %Y %H:%M:%S %Z")
user_nothing = ["nothing"] in keys
if not(name in times):
times[name] = [(datetime.fromtimestamp(mktime(classify_time)),user_nothing,zooniverse_id)]
else:
times[name].append((datetime.fromtimestamp(mktime(classify_time)),user_nothing,zooniverse_id))
def betaNLL(param,*args):
'''Negative log likelihood function for beta
<param>: list for parameters to be fitted.
<args>: 1-element array containing the sample data.
Return <nll>: negative log-likelihood to be minimized.
'''
a,b=param
data=args[0]
pdf=beta.pdf(data,a,b,loc=0,scale=1)
lg=np.log(pdf)
#-----Replace -inf with 0s------
lg=np.where(lg==-np.inf,0,lg)
nll=-1*np.sum(lg)
return nll
total = 0
totalError = 0
overall_errors = set()
print "now looking for false positives"
for name in times.keys()[:250]:
times[name].sort(key = lambda x:x[0])
correct = 0
incorrect = 0
correct_times = []
incorrect_list = []
for classification_index,(t,nothing,zooniverse_id) in enumerate(times[name][:-1]):
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
if nothing:
#how long did it take them to classify?
next_t = times[name][classification_index+1][0]
time_to_classify = next_t - t
assert time_to_classify.total_seconds() >= 0
if time_to_classify.total_seconds() == 0:
#print "weird"
continue
if time_to_classify.total_seconds() > 45:
continue
true_nothing = subject["metadata"]["retire_reason"]
if true_nothing in ["blank","blank_consensus"]:
correct += 1
correct_times.append(time_to_classify.total_seconds())
else:
incorrect += 1
incorrect_list.append((time_to_classify.total_seconds(),zooniverse_id))
if (incorrect > 0) and (correct > 0):
# print name,len(times[name])
# print correct,incorrect
# print np.mean(correct_times),np.mean(incorrect_times)
incorrect_list.sort(key = lambda x:x[0])
incorrect_times = zip(*incorrect_list)[0]
ids = zip(*incorrect_list)[1]
# is this any overall difference?
s,starting_p = ks_2samp(correct_times,incorrect_times)
if starting_p < 0.01:
for ii in range(len(incorrect_times)-1,-1,-1):
s,p = ks_2samp(correct_times,incorrect_times[:ii])
if p > 0.01:
break
totalError+= len(incorrect_times)-ii
print (len(incorrect_times)-ii,len(incorrect_times))
errors = incorrect_times[ii:]
zooniverse_errors = [ids[incorrect_times.index(t)] for t in errors]
for id in zooniverse_errors:
overall_errors.add(id)
#for id in zooniverse_errors:
# print subject_collection.find_one({"zooniverse_id":id})
# print
else:
print 0,len(incorrect_times)
total += len(incorrect_times)
# #print sum([1 for c in correct_times if c >= min(incorrect_times)])/float(len(correct_times))
# #print
# max_time = max(max(correct_times),max(incorrect_times))
# min_time = min(min(correct_times),min(incorrect_times))
# data = correct_times
# data = [(t-min_time)/float(max_time-min_time) for t in data]
# #print data
# a,b,lower,scale = beta.fit(data)
# #print a,b,lower,scale
# #print
# #print beta.cdf(0.8,a,b)
# #----------------Fit using moments----------------
# mean=np.mean(data)
# var=np.var(data,ddof=1)
# alpha1=mean**2*(1-mean)/var-mean
# beta1=alpha1*(1-mean)/mean
#
#
# print beta.cdf((incorrect_times[-1]-min_time)/(max_time-min_time),alpha1,beta1)
#print
#break
#print correct_times
#print incorrect_times
print totalError,total
error_votes = []
for zooniverse_id in list(overall_errors):
num_votes = classifications_to_retirement(zooniverse_id)
error_votes.append(num_votes)
#print times.keys()
#print user_collection.find_one({"name":"kellinora"})
print np.mean(baseline_votes),np.median(baseline_votes)
print np.mean(error_votes),np.median(error_votes)
print ks_2samp(baseline_votes,error_votes) | apache-2.0 |
michigraber/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
MaxHalford/Prince | prince/pca.py | 1 | 8473 | """Principal Component Analysis (PCA)"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import base
from sklearn import preprocessing
from sklearn import utils
from . import plot
from . import svd
class PCA(base.BaseEstimator, base.TransformerMixin):
"""
Args:
rescale_with_mean (bool): Whether to substract each column's mean or not.
rescale_with_std (bool): Whether to divide each column by it's standard deviation or not.
n_components (int): The number of principal components to compute.
n_iter (int): The number of iterations used for computing the SVD.
copy (bool): Whether to perform the computations inplace or not.
check_input (bool): Whether to check the consistency of the inputs or not.
"""
def __init__(self, rescale_with_mean=True, rescale_with_std=True, n_components=2, n_iter=3,
copy=True, check_input=True, random_state=None, engine='auto'):
self.n_components = n_components
self.n_iter = n_iter
self.rescale_with_mean = rescale_with_mean
self.rescale_with_std = rescale_with_std
self.copy = copy
self.check_input = check_input
self.random_state = random_state
self.engine = engine
def fit(self, X, y=None):
# Check input
if self.check_input:
utils.check_array(X)
# Convert pandas DataFrame to numpy array
if isinstance(X, pd.DataFrame):
X = X.to_numpy(dtype=np.float64)
# Copy data
if self.copy:
X = np.copy(X)
# Scale data
if self.rescale_with_mean or self.rescale_with_std:
self.scaler_ = preprocessing.StandardScaler(
copy=False,
with_mean=self.rescale_with_mean,
with_std=self.rescale_with_std
).fit(X)
X = self.scaler_.transform(X)
# Compute SVD
self.U_, self.s_, self.V_ = svd.compute_svd(
X=X,
n_components=self.n_components,
n_iter=self.n_iter,
random_state=self.random_state,
engine=self.engine
)
# Compute total inertia
self.total_inertia_ = np.sum(np.square(X))
return self
def transform(self, X):
"""Computes the row principal coordinates of a dataset.
Same as calling `row_coordinates`. In most cases you should be using the same
dataset as you did when calling the `fit` method. You might however also want to included
supplementary data.
"""
utils.validation.check_is_fitted(self, 's_')
if self.check_input:
utils.check_array(X)
return self.row_coordinates(X)
def row_coordinates(self, X):
"""Returns the row principal coordinates.
The row principal coordinates are obtained by projecting `X` on the right eigenvectors.
"""
utils.validation.check_is_fitted(self, 's_')
# Extract index
index = X.index if isinstance(X, pd.DataFrame) else None
# Copy data
if self.copy:
X = np.copy(X)
# Scale data
if hasattr(self, 'scaler_'):
X = self.scaler_.transform(X)
return pd.DataFrame(data=X.dot(self.V_.T), index=index)
def row_standard_coordinates(self, X):
"""Returns the row standard coordinates.
The row standard coordinates are obtained by dividing each row principal coordinate by it's
associated eigenvalue.
"""
utils.validation.check_is_fitted(self, 's_')
return self.row_coordinates(X).div(self.eigenvalues_, axis='columns')
def row_contributions(self, X):
"""Returns the row contributions towards each principal component.
Each row contribution towards each principal component is equivalent to the amount of
inertia it contributes. This is calculated by dividing the squared row coordinates by the
eigenvalue associated to each principal component.
"""
utils.validation.check_is_fitted(self, 's_')
return np.square(self.row_coordinates(X)).div(self.eigenvalues_, axis='columns')
def row_cosine_similarities(self, X):
"""Returns the cosine similarities between the rows and their principal components.
The row cosine similarities are obtained by calculating the cosine of the angle shaped by
the row principal coordinates and the row principal components. This is calculated by
squaring each row projection coordinate and dividing each squared coordinate by the sum of
the squared coordinates, which results in a ratio comprised between 0 and 1 representing the
squared cosine.
"""
utils.validation.check_is_fitted(self, 's_')
squared_coordinates = np.square(self.row_coordinates(X))
total_squares = squared_coordinates.sum(axis='columns')
return squared_coordinates.div(total_squares, axis='rows')
def column_correlations(self, X):
"""Returns the column correlations with each principal component."""
utils.validation.check_is_fitted(self, 's_')
# Convert numpy array to pandas DataFrame
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
row_pc = self.row_coordinates(X)
return pd.DataFrame({
component: {
feature: row_pc[component].corr(X[feature])
for feature in X.columns
}
for component in row_pc.columns
})
@property
def eigenvalues_(self):
"""Returns the eigenvalues associated with each principal component."""
utils.validation.check_is_fitted(self, 's_')
return np.square(self.s_).tolist()
@property
def explained_inertia_(self):
"""Returns the percentage of explained inertia per principal component."""
utils.validation.check_is_fitted(self, 's_')
return [eig / self.total_inertia_ for eig in self.eigenvalues_]
def plot_row_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1,
labels=None, color_labels=None, ellipse_outline=False,
ellipse_fill=True, show_points=True, **kwargs):
"""Plot the row principal coordinates."""
utils.validation.check_is_fitted(self, 's_')
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
# Add style
ax = plot.stylize_axis(ax)
# Make sure X is a DataFrame
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
# Retrieve principal coordinates
coordinates = self.row_coordinates(X)
x = coordinates[x_component].astype(np.float)
y = coordinates[y_component].astype(np.float)
# Plot
if color_labels is None:
ax.scatter(x, y, **kwargs)
else:
for color_label in sorted(list(set(color_labels))):
mask = np.array(color_labels) == color_label
color = ax._get_lines.get_next_color()
# Plot points
if show_points:
ax.scatter(x[mask], y[mask], color=color, **kwargs, label=color_label)
# Plot ellipse
if (ellipse_outline or ellipse_fill):
x_mean, y_mean, width, height, angle = plot.build_ellipse(x[mask], y[mask])
ax.add_patch(mpl.patches.Ellipse(
(x_mean, y_mean),
width,
height,
angle=angle,
linewidth=2 if ellipse_outline else 0,
color=color,
fill=ellipse_fill,
alpha=0.2 + (0.3 if not show_points else 0) if ellipse_fill else 1
))
# Add labels
if labels is not None:
for i, label in enumerate(labels):
ax.annotate(label, (x[i], y[i]))
# Legend
ax.legend()
# Text
ax.set_title('Row principal coordinates')
ei = self.explained_inertia_
ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, 100 * ei[x_component]))
ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, 100 * ei[y_component]))
return ax
| mit |
lhm30/PIDGINv2 | sim_to_train.py | 1 | 6260 | #Author : Lewis Mervin [email protected]
#Supervisor : Dr. A. Bender
#All rights reserved 2016
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 21/06/16) and ChEMBL21
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
import cPickle
import zipfile
import glob
import os
import sys
import math
import numpy as np
from multiprocessing import Pool
import multiprocessing
multiprocessing.freeze_support()
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: [email protected]\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
return fp
#calculate fingerprints for chunked array of smiles
def arrayFP(inp):
outfp = []
outsmi = []
for i in inp:
try:
outfp.append(calcFingerprints(i))
outsmi.append(i)
except:
print 'SMILES Parse Error: ' + i
return outfp,outsmi
#import user query
def importQuery(in_file):
query = open(in_file).read().splitlines()
#collect IDs, if present
if len(query[0].split()) > 1:
ids = [line.split()[1] for line in query]
query = [line.split()[0] for line in query]
else:
ids = None
smiles_per_core = int(math.ceil(len(query) / N_cores)+1)
chunked_smiles = [query[x:x+smiles_per_core] for x in xrange(0, len(query), smiles_per_core)]
pool = Pool(processes=N_cores) # set up resources
jobs = pool.imap(arrayFP, chunked_smiles)
processed_fp = []
processed_smi = []
for i, result in enumerate(jobs):
processed_fp += result[0]
processed_smi += result[1]
pool.close()
pool.join()
#remove IDs of SMILES parsing errors
if ids:
processed_ids = []
for idx, smi in enumerate(query):
if smi in processed_smi:
processed_ids.append(ids[idx])
ids = processed_ids
#if IDs weren't present, use SMILES as IDs
else:
ids = processed_smi
return processed_fp, processed_smi, ids
#get info for uniprots
def getUniprotInfo():
if os.name == 'nt': sep = '\\'
else: sep = '/'
model_info = [l.split('\t') for l in open(os.path.dirname(os.path.abspath(__file__)) + sep + 'classes_in_model.txt').read().splitlines()]
return_dict = {l[0] : l[0:7] for l in model_info}
return return_dict
#sim worker
def doSimSearch(model_name):
if os.name == 'nt': sep = '\\'
else: sep = '/'
mod = model_name.split(sep)[-1].split('.')[0]
try:
with zipfile.ZipFile(os.path.dirname(os.path.abspath(__file__)) + sep + 'actives' + sep + mod + '.smi.zip', 'r') as zfile:
comps = [i.split('\t') for i in zfile.open(mod + '.smi', 'r').read().splitlines()]
except IOError: return
comps2 = []
afp = []
for comp in comps:
try:
afp.append(calcFingerprints(comp[1]))
comps2.append(comp)
except: pass
ret = []
for i,fp in enumerate(querymatrix):
sims = DataStructs.BulkTanimotoSimilarity(fp,afp)
idx = sims.index(max(sims))
ret.append([sims[idx], mod] + comps2[idx] + [smiles[i]])
return ret
#prediction runner
def performSimSearch(models):
sims_results = []
pool = Pool(processes=N_cores, initializer=initPool, initargs=(querymatrix,smiles)) # set up resources
jobs = pool.imap_unordered(doSimSearch, models)
out_file2.write('Uniprot\tPref_Name\tGene ID\tTarget_Class\tOrganism\tPDB_ID\tDisGeNET_Diseases_0.06\t' + '\t'.join(map(str,ids)) + '\n')
for i, result in enumerate(jobs):
percent = (float(i)/float(len(models)))*100 + 1
sys.stdout.write(' Calculating Sims for Query Molecules: %3d%%\r' % percent)
sys.stdout.flush()
if result is not None:
sims_results += result
out_file2.write('\t'.join(map(str,model_info[result[0][1]])))
for sim in result:
out_file2.write('\t' + str(round(sim[0],3)))
out_file2.write('\n')
pool.close()
pool.join()
return sims_results
#initializer for the pool
def initPool(querymatrix_,smiles_):
global querymatrix, smiles
querymatrix = querymatrix_
smiles = smiles_
#main
if __name__ == '__main__':
if os.name == 'nt': sep = '\\'
else: sep = '/'
input_name = sys.argv[1]
N_cores = int(sys.argv[2])
try:
desired_organism = sys.argv[3]
except IndexError:
desired_organism = None
introMessage()
print ' Calculating Near-Neighbors for ' + input_name
print ' Using ' + str(N_cores) + ' Cores'
models = [modelfile for modelfile in glob.glob(os.path.dirname(os.path.abspath(__file__)) + sep + 'models' + sep + '*.zip')]
model_info = getUniprotInfo()
if desired_organism is not None:
models = [mod for mod in models if model_info[mod.split(sep)[-1].split('.')[0]][4] == desired_organism]
output_name = input_name + '_out_similarity_details' + desired_organism[:3] + '.txt'
output_name2 = input_name + '_out_similarity_matrix' + desired_organism[:3] + '.txt'
print ' Predicting for organism : ' + desired_organism
else:
output_name = input_name + '_out_similarity_details.txt'
output_name2 = input_name + '_out_similarity_matrix.txt'
print ' Total Number of Classes : ' + str(len(models))
output_name = input_name + '_out_similarity_details.txt'
output_name2 = input_name + '_out_similarity_matrix.txt'
out_file = open(output_name, 'w')
out_file2 = open(output_name2, 'w')
querymatrix,smiles,ids = importQuery(input_name)
print ' Total Number of Query Molecules : ' + str(len(querymatrix))
sims_results = performSimSearch(models)
out_file.write('Uniprot\tPref_Name\tGene ID\tTarget_Class\tOrganism\tNear_Neighbor_ChEMBLID\tNear_Neighbor_Smiles\tNear_Neighbor_Bioactive_organism\tNear_Neighbor_conf_score\tNN_activity\tNN_Units\tInput_Compound\tSimilarity\n')
for row in sorted(sims_results,reverse=True):
out_file.write('\t'.join(map(str,model_info[row[1]][:5])) + '\t' + '\t'.join(map(str,row[2:])) + '\t' + str(row[0]) + '\n')
print '\n Wrote Results to: ' + output_name
print ' Wrote Results to: ' + output_name2
out_file.close()
| mit |
jm-begon/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
X-DataInitiative/tick | examples/plot_2d_linear_regression.py | 2 | 3541 | """
===================================
Linear regression in two dimensions
===================================
In this example, we try to predict the median price of houses in Boston's
neighbors by looking at two features: the average of the number of rooms per
dwelling and the pencentage of low status in the population.
The linear regression is done using the `Boston Housing Dataset`_ which
contains 13 features used to predict the median price of houses.
The two features selected are the most efficent on the test set.
The 3D representation allows a better understanding of the prediction
mechanism with two features.
This example is inspired by linear regression example from
`scikit-learn documentation`_.
.. _Boston Housing Dataset: https://www.kaggle.com/c/boston-housing
.. _scikit-learn documentation: http://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html#sphx-glr-auto-examples-linear-model-plot-ols-py
"""
import matplotlib.pyplot as plt
import numpy as np
from tick import linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.utils import shuffle
from sklearn.datasets import load_boston
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
# Load the Boston Housing Dataset
features, label = load_boston(return_X_y=True)
features, label = shuffle(features, label, random_state=0)
# Use two features: the average of the number of rooms per dwelling and
# the pencentage of low status of the population
X = features[:, [5, 12]]
# Split the data into training/testing sets
n_train_data = int(0.8 * X.shape[0])
X_train = X[:n_train_data]
X_test = X[n_train_data:]
y_train = label[:n_train_data]
y_test = label[n_train_data:]
# Create linear regression and fit it on the training set
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
print('Coefficients:')
print(' intercept: {:.2f}'.format(regr.intercept))
print(' average room per dwelling: {:.2f}'.format(regr.weights[0]))
print(' percentage of low status in population: {:.2f}'
.format(regr.weights[1]))
# The mean squared error
print('Mean squared error on test set: {:.2f}'.format(
mean_squared_error(y_test, y_pred)))
# Explained variance score: 1 is perfect prediction
print('Variance score on test set: {:.2f}'.format(r2_score(y_test, y_pred)))
# To work in 3D
# We first generate a mesh grid
resolution = 10
x = X_test[:, 0]
y = X_test[:, 1]
z = y_test
x_surf = np.linspace(min(x), max(x), resolution)
y_surf = np.linspace(min(y), max(y), resolution)
x_surf, y_surf = np.meshgrid(x_surf, y_surf)
# and then predict the label for all values in the grid
z_surf = np.zeros_like(x_surf)
mesh_points = np.vstack((x_surf.ravel(), y_surf.ravel())).T
z_surf.ravel()[:] = regr.predict(mesh_points)
fig = plt.figure(figsize=(20, 5))
# 3D representation under different rotated angles for a better visualazion
xy_angles = [10, 35, 60, 85]
z_angle = 20
for i, angle in enumerate(xy_angles):
n_columns = len(xy_angles)
position = i + 1
ax = fig.add_subplot(1, n_columns, position, projection='3d')
ax.view_init(z_angle, angle)
ax.plot_surface(x_surf, y_surf, z_surf, cmap=cm.hot, rstride=1, cstride=1,
alpha=0.3, linewidth=0.2, edgecolors='black')
ax.scatter(x, y, z)
ax.set_title('angle: {}°'.format(angle))
ax.set_zlabel('median house pricing')
ax.set_xlabel('avg room per dwelling')
ax.set_ylabel('% low status population')
plt.show()
| bsd-3-clause |
saketkc/statsmodels | statsmodels/iolib/tests/test_summary.py | 31 | 1535 | '''examples to check summary, not converted to tests yet
'''
from __future__ import print_function
if __name__ == '__main__':
from statsmodels.regression.tests.test_regression import TestOLS
#def mytest():
aregression = TestOLS()
TestOLS.setupClass()
results = aregression.res1
r_summary = str(results.summary_old())
print(r_summary)
olsres = results
print('\n\n')
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
from statsmodels.discrete.tests.test_discrete import TestProbitNewton
aregression = TestProbitNewton()
TestProbitNewton.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
print('\n\n')
probres = results
from statsmodels.robust.tests.test_rlm import TestHampel
aregression = TestHampel()
#TestHampel.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
rlmres = results
print('\n\n')
from statsmodels.genmod.tests.test_glm import TestGlmBinomial
aregression = TestGlmBinomial()
#TestGlmBinomial.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print(r_summary)
#print(results.summary2(return_fmt='latex'))
#print(results.summary2(return_fmt='csv'))
smry = olsres.summary()
print(smry.as_csv())
# import matplotlib.pyplot as plt
# plt.plot(rlmres.model.endog,'o')
# plt.plot(rlmres.fittedvalues,'-')
#
# plt.show() | bsd-3-clause |
CoCoMol/CoCoPy | modules/analysis/util.py | 2 | 9941 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# CoCoPy - A python toolkit for rotational spectroscopy
#
# Copyright (c) 2016 by David Schmitz ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the “Software”), to deal in the
# Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
# THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# MIT Licence (http://mit-license.org/)
#
################################################################################
from __future__ import print_function
import numpy as np
import io
import scipy.constants as constants
import sys
import glob
data_path = '/Volumes/DATA/'
lines = dict()
lines['acetone'] = [5253.063, 5269.071, 5270.904, 5276.057, 7343.774, 7398.953, 7399.964, 7453.183]
lines['background'] = [
2050.00, 2060.00, 2070.00, 2090.00, 2100.00, 2187.51, 2200.01, 2250.00, 2260.00, 2270.00,
2280.00, 2290.00, 2310.00, 2330.00, 2339.99, 2349.99, 2359.99, 2369.99, 2379.99, 2390.01,
2409.99, 2420.01, 2437.51, 2450.00, 2460.00, 2470.00, 2480.00, 2500.00, 2540.02, 2549.99,
2559.99, 2569.99, 2600.01, 2650.01, 2660.00, 2670.00, 2690.00, 2700.00, 2750.00, 2754.99,
2799.99, 2810.01, 2812.51, 2890.00, 2900.00, 2920.00, 2960.00, 2964.99, 2969.99, 2979.99,
2989.99, 2999.99, 3005.01, 3010.01, 3020.01, 3030.01, 3045.01, 3050.01, 3060.01, 3090.00,
3100.00, 3110.00, 3125.00, 3187.49, 3245.01, 3250.01, 3360.00, 3399.99, 3437.51, 3490.00,
3495.00, 3500.00, 3515.00, 3520.00, 3710.00, 3750.00, 3799.99, 3809.99, 4125.00, 4687.51,
5840.00, 5860.00, 5870.00, 5874.99, 5879.99, 6000.00, 6010.00, 6020.00, 2010.00, 2020.00,
6089.99, 6130.01, 6170.01, 6249.98, 6250.00, 6330.01, 6370.01, 6410.00, 6620.00, 6630.00,
6640.00, 7812.51, 2300.00, 4500.00, 7500.00, 2130.00, 2240.00, 2110.00]
'''
Todo:
- 1 Write comments
- 2 Write parser for out file
- 3
'''
def stylePlot(data='spectrum'):
import matplotlib.pyplot as plt
ax = plt.gca()
if data == 'spectrum':
xlabel = r'Frequency [MHz]'
ylabel = r'Intensity [arb. u.]'
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
elif data == 'fid':
xlabel = r'Time [$\mu s$]'
ylabel = r'Intensity [arb. u.]'
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
ax.ticklabel_format(axis='x', style='plain', useOffset=False)
elif data == 'intensity':
xlabel = r'Frequency [MHz]'
ylabel = r'Intensity [arb. u.]'
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
elif data == 'energy':
xlabel = r'Angle [$\degree$]'
ylabel = r'Energy [kcal/mol]'
ax.ticklabel_format(axis='y', style='sci', scilimits=(-4,4), useOffset=False)
elif data == 'phase':
xlabel = r'Frequency [MHz]'
ylabel = r'Phase [radians]'
plt.ylim(-3.5, 3.5)
plt.yticks([-np.pi, -np.pi/2, 0., np.pi/2, np.pi], [r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$'])
plt.title(r'Plot', fontsize=20, family='serif', position=(0.5, 1.05))
plt.xlabel(xlabel, fontsize=20, horizontalalignment='center')
plt.ylabel(ylabel, fontsize=20)
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
ax.yaxis.set_tick_params(pad=8)
ax.xaxis.set_tick_params(pad=8)
ax.xaxis.set_label_coords(0.5, -0.1)
ax.yaxis.set_label_coords(-0.07, .5)
def pubPlot(width = 384):
import matplotlib.pyplot as plt
fig_width_pt = width # Get this from LaTeX using \the\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
params = {'backend': 'pdf',
'axes.labelsize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True,
'figure.figsize': fig_size,
'font.size': 8,
'font.family': 'serif',
'font.serif': 'serif',
'pdf.fonttype':42}
plt.rcParams.update(params)
plt.figure(1)
plt.clf()
plt.axes([0.125,0.2,0.95-0.125,0.95-0.2])
def execute_notebook(nbfile):
# execute_notebook("loaddata.ipynb")
from IPython import nbformat
with io.open(nbfile) as f:
nb = nbformat.read(f, 'json')
ip = get_ipython()
for cell in nb.worksheets[0].cells:
if cell.cell_type != 'code':
continue
ip.run_cell(cell.input)
def convert_energy(energy, origin='hartree', dest='wavenumber'):
#http://docs.scipy.org/doc/scipy/reference/constants.html#module-scipy.constants
energy_h = 0.
if origin == 'hartree':
energy_h = constants.physical_constants['hartree-joule relationship'][0] * energy
elif origin == 'wavenumber' or origin == 'cm-1':
energy_h = constants.physical_constants['inverse meter-joule relationship'][0] * energy * 100
elif origin == 'joule' or origin == 'J':
energy_h = 1. * energy
elif origin == 'hertz' or origin == 'Hz':
energy_h = constants.physical_constants['hertz-joule relationship'][0] * energy
elif origin == 'eV':
energy_h = constants.physical_constants['electron volt-joule relationship'][0] * energy
elif origin == 'wavelength' or origin == 'm':
energy_h = constants.c * constants.h / energy
elif origin == 'kCal/mol':
energy_h = energy * 4184.0 / constants.Avogadro
elif origin == 'kJ/mol':
energy_h = energy * 1000. / constants.Avogadro
if dest == 'hartree':
energy_h = constants.physical_constants['joule-hartree relationship'][0] * energy_h
elif dest == 'wavenumber' or dest == 'cm-1':
energy_h = constants.physical_constants['joule-inverse meter relationship'][0] * energy_h / 100.
elif dest == 'joule' or dest == 'J':
energy_h = 1. * energy_h
elif dest == 'hertz' or dest == 'Hz':
energy_h = constants.physical_constants['joule-hertz relationship'][0] * energy_h
elif dest == 'eV':
energy_h = constants.physical_constants['joule-electron volt relationship'][0] * energy_h
elif dest == 'wavelength' or dest == 'm':
energy_h = constants.c * constants.h / energy_h
elif dest == 'kCal/mol':
energy_h = energy_h / 4184.0 * constants.Avogadro
elif dest == 'kJ/mol':
energy_h = energy_h / 1000. * constants.Avogadro
return energy_h
def convert_rot(value):
#http://docs.scipy.org/doc/scipy/reference/constants.html#module-scipy.constants
return constants.h / (8. * np.pi**2 * value * 1.E6 * constants.atomic_mass) * 1.E20
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iter):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def draw_cube(ax, size, pos=[0,0,0], edges = False, color = 'red', alpha=0.1):
import matplotlib.pyplot as plt
import matplotlib.colors as co
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from itertools import product
h = list(np.array([-1., 1.])*size)
tupleList=np.array([x for x in product(h,h,h)])
tupleList[:,0] +=pos[0]; tupleList[:,1] += pos[1]; tupleList[:,2] += pos[2]
vertices = [[0, 1, 3, 2], [0, 1, 5, 4], [0, 2, 6, 4], [2, 3, 7, 6], [4, 5, 7, 6], [1, 3, 7, 5]]
cc = lambda arg: co.colorConverter.to_rgba(arg, alpha=alpha)
poly3d = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))] for ix in range(len(vertices))]
if edges == True:
ax.scatter(tupleList[:,0],tupleList[:,1],tupleList[:,2], c=color)
collection = Poly3DCollection(poly3d, linewidths=.2, linestyles='-', edgecolors=['grey'], facecolors = [cc('red')])
#j = co.ColorConverter()
#face_color = co.rgb2hex(j.to_rgb(color))
#collection.set_facecolor(face_color)
ax.add_collection3d(collection)
| mit |
gicsi/aap | src/machine_learning/nltk-trainer-master/train_classifier.py | 5 | 16940 | #!/usr/bin/env python
import argparse, collections, functools, itertools, math, operator, os.path, re, string, sys
import nltk.data
import nltk_trainer.classification.args
from nltk.classify import DecisionTreeClassifier, MaxentClassifier, NaiveBayesClassifier
from nltk.classify.util import accuracy
from nltk.corpus import stopwords
from nltk.corpus.reader import CategorizedPlaintextCorpusReader, CategorizedTaggedCorpusReader
from nltk.corpus.util import LazyCorpusLoader
from nltk.metrics import BigramAssocMeasures, f_measure, masi_distance, precision, recall
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.util import ngrams
from nltk_trainer import dump_object, import_attr, iteritems, load_corpus_reader
from nltk_trainer.classification import corpus, scoring
from nltk_trainer.classification.featx import (bag_of_words, bag_of_words_in_set,
word_counts, train_test_feats, word_counts_in_set)
from nltk_trainer.classification.multi import MultiBinaryClassifier
########################################
## command options & argument parsing ##
########################################
parser = argparse.ArgumentParser(description='Train a NLTK Classifier')
parser.add_argument('corpus', help='corpus name/path relative to an nltk_data directory')
parser.add_argument('--filename', help='''filename/path for where to store the
pickled classifier, the default is {corpus}_{algorithm}.pickle in
~/nltk_data/classifiers''')
parser.add_argument('--no-pickle', action='store_true', default=False,
help="don't pickle and save the classifier")
parser.add_argument('--classifier', '--algorithm', default=['NaiveBayes'], nargs='+',
choices=nltk_trainer.classification.args.classifier_choices,
help='''Classifier algorithm to use, defaults to %(default)s. Maxent uses the
default Maxent training algorithm, either CG or iis.''')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to 1. 0 is no trace output.')
parser.add_argument('--show-most-informative', default=0, type=int,
help='number of most informative features to show, works for all algorithms except DecisionTree')
corpus_group = parser.add_argument_group('Training Corpus')
corpus_group.add_argument('--reader',
default='nltk.corpus.reader.CategorizedPlaintextCorpusReader',
help='Full module path to a corpus reader class, such as %(default)s')
corpus_group.add_argument('--cat_pattern', default='(.+)/.+',
help='''A regular expression pattern to identify categories based on file paths.
If cat_file is also given, this pattern is used to identify corpus file ids.
The default is '(.+)/+', which uses sub-directories as categories.''')
corpus_group.add_argument('--cat_file',
help='relative path to a file containing category listings')
corpus_group.add_argument('--delimiter', default=' ',
help='category delimiter for category file, defaults to space')
corpus_group.add_argument('--instances', default='files',
choices=('sents', 'paras', 'files'),
help='''the group of words that represents a single training instance,
the default is to use entire files''')
corpus_group.add_argument('--fraction', default=1.0, type=float,
help='''The fraction of the corpus to use for training a binary or
multi-class classifier, the rest will be used for evaulation.
The default is to use the entire corpus, and to test the classifier
against the same training data. Any number < 1 will test against
the remaining fraction.''')
corpus_group.add_argument('--train-prefix', default=None,
help='optional training fileid prefix for multi classifiers')
corpus_group.add_argument('--test-prefix', default=None,
help='optional testing fileid prefix for multi classifiers')
corpus_group.add_argument('--word-tokenizer', default='', help='Word Tokenizer class path')
corpus_group.add_argument('--sent-tokenizer', default='', help='Sent Tokenizer data.pickle path')
corpus_group.add_argument('--para-block-reader', default='', help='Block reader function path')
corpus_group.add_argument('--labels', default=[],
help='''If given a list of labels, default categories by corpus are omitted''')
classifier_group = parser.add_argument_group('Classifier Type',
'''A binary classifier has only 2 labels, and is the default classifier type.
A multi-class classifier chooses one of many possible labels.
A multi-binary classifier choose zero or more labels by combining multiple
binary classifiers, 1 for each label.''')
classifier_group.add_argument('--binary', action='store_true', default=False,
help='train a binary classifier, or a multi-binary classifier if --multi is also given')
classifier_group.add_argument('--multi', action='store_true', default=False,
help='train a multi-class classifier, or a multi-binary classifier if --binary is also given')
feat_group = parser.add_argument_group('Feature Extraction',
'The default is to lowercase every word, strip punctuation, and use stopwords')
feat_group.add_argument('--ngrams', nargs='+', type=int,
help='use n-grams as features.')
feat_group.add_argument('--no-lowercase', action='store_true', default=False,
help="don't lowercase every word")
feat_group.add_argument('--filter-stopwords', default='no',
choices=['no']+stopwords.fileids(),
help='language stopwords to filter, defaults to "no" to keep stopwords')
feat_group.add_argument('--punctuation', action='store_true', default=False,
help="don't strip punctuation")
feat_group.add_argument('--value-type', default='bool', choices=('bool', 'int', 'float'),
help='''Data type of values in featuresets. The default is bool, which ignores word counts.
Use int to get word and/or ngram counts.''')
score_group = parser.add_argument_group('Feature Scoring',
'The default is no scoring, all words are included as features')
score_group.add_argument('--score_fn', default='chi_sq',
choices=[f for f in dir(BigramAssocMeasures) if not f.startswith('_')],
help='scoring function for information gain and bigram collocations, defaults to chi_sq')
score_group.add_argument('--min_score', default=0, type=int,
help='minimum score for a word to be included, default is 0 to include all words')
score_group.add_argument('--max_feats', default=0, type=int,
help='maximum number of words to include, ordered by highest score, defaults is 0 to include all words')
eval_group = parser.add_argument_group('Classifier Evaluation',
'''The default is to test the classifier against the unused fraction of the
corpus, or against the entire corpus if the whole corpus is used for training.''')
eval_group.add_argument('--no-eval', action='store_true', default=False,
help="don't do any evaluation")
eval_group.add_argument('--no-accuracy', action='store_true', default=False,
help="don't evaluate accuracy")
eval_group.add_argument('--no-precision', action='store_true', default=False,
help="don't evaluate precision")
eval_group.add_argument('--no-recall', action='store_true', default=False,
help="don't evaluate recall")
eval_group.add_argument('--no-fmeasure', action='store_true', default=False,
help="don't evaluate f-measure")
eval_group.add_argument('--no-masi-distance', action='store_true', default=False,
help="don't evaluate masi distance (only applies to a multi binary classifier)")
eval_group.add_argument('--cross-fold', type=int, default=0,
help='''If given a number greater than 2, will do cross fold validation
instead of normal training and testing. This option implies --no-pickle,
is useless with --trace 0 and/or --no-eval, and currently does not work
with --multi --binary.
''')
nltk_trainer.classification.args.add_maxent_args(parser)
nltk_trainer.classification.args.add_decision_tree_args(parser)
nltk_trainer.classification.args.add_sklearn_args(parser)
args = parser.parse_args()
###################
## corpus reader ##
###################
reader_args = []
reader_kwargs = {}
if args.cat_file:
reader_kwargs['cat_file'] = args.cat_file
if args.delimiter and args.delimiter != ' ':
reader_kwargs['delimiter'] = args.delimiter
if args.cat_pattern:
reader_args.append(args.cat_pattern)
else:
reader_args.append('.+/.+')
elif args.cat_pattern:
reader_args.append(args.cat_pattern)
reader_kwargs['cat_pattern'] = re.compile(args.cat_pattern)
if args.word_tokenizer:
reader_kwargs['word_tokenizer'] = import_attr(args.word_tokenizer)()
if args.sent_tokenizer:
reader_kwargs['sent_tokenizer'] = nltk.data.LazyLoader(args.sent_tokenizer)
if args.para_block_reader:
reader_kwargs['para_block_reader'] = import_attr(args.para_block_reader)
if args.trace:
print('loading %s' % args.corpus)
categorized_corpus = load_corpus_reader(args.corpus, args.reader,
*reader_args, **reader_kwargs)
if not hasattr(categorized_corpus, 'categories'):
raise ValueError('%s is does not have categories for classification')
if len(args.labels) > 0:
labels = args.labels.split(",")
else:
labels = categorized_corpus.categories()
nlabels = len(labels)
if args.trace:
print('%d labels: %s' % (nlabels, labels))
if not nlabels:
raise ValueError('corpus does not have any categories')
elif nlabels == 1:
raise ValueError('corpus must have more than 1 category')
elif nlabels == 2 and args.multi:
raise ValueError('corpus must have more than 2 categories if --multi is specified')
########################
## text normalization ##
########################
if args.filter_stopwords == 'no':
stopset = set()
else:
stopset = set(stopwords.words(args.filter_stopwords))
def norm_words(words):
if not args.no_lowercase:
words = (w.lower() for w in words)
if not args.punctuation:
words = (w.strip(string.punctuation) for w in words)
words = (w for w in words if w)
if stopset:
words = (w for w in words if w.lower() not in stopset)
# in case we modified words in a generator, ensure it's a list so we can add together
if not isinstance(words, list):
words = list(words)
if args.ngrams:
return functools.reduce(operator.add, [words if n == 1 else list(ngrams(words, n)) for n in args.ngrams])
else:
return words
#####################
## text extraction ##
#####################
if args.multi and args.binary:
label_instance_function = {
'sents': corpus.multi_category_sent_words,
'paras': corpus.multi_category_para_words,
'files': corpus.multi_category_file_words
}
lif = label_instance_function[args.instances]
train_instances = lif(categorized_corpus, args.train_prefix)
test_instances = lif(categorized_corpus, args.test_prefix)
# if we need all the words by category for score_fn, use this method
def category_words():
'''
return an iteration of tuples of category and list of all words in instances of that category.
Used if we are scoring the words for correlation to categories for feature selection (i.e.,
score_fn and max_feats are set)
'''
cat_words = defaultdict([])
for (words, cats) in train_instances:
if isinstance(cats, collections.Iterable):
for cat in cats:
cat_words[cat].extend(words)
else:
cat_words[cats].extend(words)
return iteritems(cat_words)
else:
def split_list(lis, fraction):
'''split a list into 2 lists based on the fraction provided. Used to break the instances into
train and test sets'''
if fraction != 1.0:
l = len(lis)
cutoff = int(math.ceil(l * fraction))
return lis[0:cutoff], lis[cutoff:]
else:
return lis, []
label_instance_function = {
'sents': corpus.category_sent_words,
'paras': corpus.category_para_words,
'files': corpus.category_file_words
}
lif = label_instance_function[args.instances]
train_instances = {}
test_instances = {}
for label in labels:
instances = (norm_words(i) for i in lif(categorized_corpus, label))
instances = [i for i in instances if i]
train_instances[label], test_instances[label] = split_list(instances, args.fraction)
if args.trace > 1:
info = (label, len(train_instances[label]), len(test_instances[label]))
print('%s: %d training instances, %d testing instances' % info)
# if we need all the words by category for score_fn, use this method
def category_words():
'''
return an iteration of tuples of category and list of all words in instances of that category.
Used if we are scoring the words for correlation to categories for feature selection (i.e.,
score_fn and max_feats are set)
'''
return ((cat, (word for i in instance_list for word in i)) for cat, instance_list in iteritems(train_instances))
##################
## word scoring ##
##################
score_fn = getattr(BigramAssocMeasures, args.score_fn)
if args.min_score or args.max_feats:
if args.trace:
print('calculating word scores')
# flatten the list of instances to a single iteration of all the words
cat_words = category_words()
ws = scoring.sorted_word_scores(scoring.sum_category_word_scores(cat_words, score_fn))
if args.min_score:
ws = [(w, s) for (w, s) in ws if s >= args.min_score]
if args.max_feats:
ws = ws[:args.max_feats]
bestwords = set([w for (w, s) in ws])
if args.value_type == 'bool':
if args.trace:
print('using bag of words from known set feature extraction')
featx = lambda words: bag_of_words_in_set(words, bestwords)
else:
if args.trace:
print('using word counts from known set feature extraction')
featx = lambda words: word_counts_in_set(words, bestwords)
if args.trace:
print('%d words meet min_score and/or max_feats' % len(bestwords))
elif args.value_type == 'bool':
if args.trace:
print('using bag of words feature extraction')
featx = bag_of_words
else:
if args.trace:
print('using word counts feature extraction')
featx = word_counts
#########################
## extracting features ##
#########################
def extract_features(label_instances, featx):
if isinstance(label_instances, dict):
# for not (args.multi and args.binary)
# e.g., li = { 'spam': [ ['hello','world',...], ... ], 'ham': [ ['lorem','ipsum'...], ... ] }
feats = []
for label, instances in iteritems(label_instances):
feats.extend([(featx(i), label) for i in instances])
else:
# for arg.multi and args.binary
# e.g., li = [ (['hello','world',...],label1), (['lorem','ipsum'],label2) ]
feats = [(featx(i), label) for i, label in label_instances ]
return feats
train_feats = extract_features(train_instances, featx)
test_feats = extract_features(test_instances, featx)
# if there were no instances reserved for testing, test over the whole training set
if not test_feats:
test_feats = train_feats
if args.trace:
print('%d training feats, %d testing feats' % (len(train_feats), len(test_feats)))
##############
## training ##
##############
trainf = nltk_trainer.classification.args.make_classifier_builder(args)
if args.cross_fold:
if args.multi and args.binary:
raise NotImplementedError ("cross-fold is not supported for multi-binary classifiers")
scoring.cross_fold(train_feats, trainf, accuracy, folds=args.cross_fold,
trace=args.trace, metrics=not args.no_eval, informative=args.show_most_informative)
sys.exit(0)
if args.multi and args.binary:
if args.trace:
print('training multi-binary %s classifier' % args.classifier)
classifier = MultiBinaryClassifier.train(labels, train_feats, trainf)
else:
classifier = trainf(train_feats)
################
## evaluation ##
################
if not args.no_eval:
if not args.no_accuracy:
try:
print('accuracy: %f' % accuracy(classifier, test_feats))
except ZeroDivisionError:
print('accuracy: 0')
if args.multi and args.binary and not args.no_masi_distance:
print('average masi distance: %f' % (scoring.avg_masi_distance(classifier, test_feats)))
if not args.no_precision or not args.no_recall or not args.no_fmeasure:
if args.multi and args.binary:
refsets, testsets = scoring.multi_ref_test_sets(classifier, test_feats)
else:
refsets, testsets = scoring.ref_test_sets(classifier, test_feats)
for label in labels:
ref = refsets[label]
test = testsets[label]
if not args.no_precision:
print('%s precision: %f' % (label, precision(ref, test) or 0))
if not args.no_recall:
print('%s recall: %f' % (label, recall(ref, test) or 0))
if not args.no_fmeasure:
print('%s f-measure: %f' % (label, f_measure(ref, test) or 0))
if args.show_most_informative and hasattr(classifier, 'show_most_informative_features') and not (args.multi and args.binary) and not args.cross_fold:
print('%d most informative features' % args.show_most_informative)
classifier.show_most_informative_features(args.show_most_informative)
##############
## pickling ##
##############
if not args.no_pickle:
if args.filename:
fname = os.path.expanduser(args.filename)
else:
corpus_clean = os.path.split(args.corpus.rstrip('/'))[1]
name = '%s_%s.pickle' % (corpus_clean, '_'.join(args.classifier))
fname = os.path.join(os.path.expanduser('~/nltk_data/classifiers'), name)
dump_object(classifier, fname, trace=args.trace)
| gpl-3.0 |
bsipocz/scikit-image | doc/examples/plot_nonlocal_means.py | 10 | 1317 | """
=================================================
Non-local means denoising for preserving textures
=================================================
In this example, we denoise a detail of the astronaut image using the non-local
means filter. The non-local means algorithm replaces the value of a pixel by an
average of a selection of other pixels values: small patches centered on the
other pixels are compared to the patch centered on the pixel of interest, and
the average is performed only for pixels that have patches close to the current
patch. As a result, this algorithm can restore well textures, that would be
blurred by other denoising algoritm.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.restoration import nl_means_denoising
astro = img_as_float(data.astronaut())
astro = astro[30:180, 150:300]
noisy = astro + 0.3 * np.random.random(astro.shape)
noisy = np.clip(noisy, 0, 1)
denoise = nl_means_denoising(noisy, 7, 9, 0.08)
fig, ax = plt.subplots(ncols=2, figsize=(8, 4))
ax[0].imshow(noisy)
ax[0].axis('off')
ax[0].set_title('noisy')
ax[1].imshow(denoise)
ax[1].axis('off')
ax[1].set_title('non-local means')
fig.subplots_adjust(wspace=0.02, hspace=0.2,
top=0.9, bottom=0.05, left=0, right=1)
plt.show()
| bsd-3-clause |
talbrecht/pism_pik | examples/std-greenland/basemapfigs.py | 1 | 6454 | #!/usr/bin/env python
# generate figures in Getting Started section of User's Manual
# usage:
# $ python basemapfigs.py FILEROOT [FIELD] [DPI]
# where
# FILEROOT root of NetCDF filename and output .png figures
# FIELD optional: one of {velbase_mag, [velsurf_mag], mask, usurf} (edit script to add more)
# DPI optional: resolution in dots per inch [200]
#
# equivalent usages:
# $ python basemapfigs.py g20km_10ka_hy velsurf_mag 200
# $ python basemapfigs.py g20km_10ka_hy velsurf_mag
# $ python basemapfigs.py g20km_10ka_hy
#
# generate figs like those in Getting Started section of User's Manual:
# $ for FLD in velsurf_mag usurf velbase_mag mask; do ./basemapfigs.py g20km_10ka_hy ${FLD}; done
#
# crop out western Greenland with command like this (uses ImageMagick):
# $ ./basemapfigs.py g20km_10ka_hy velsurf_mag 500
# $ convert -crop 600x800+400+800 +repage g20km_10ka_hy-velsurf_mag.png g20km-detail.png
#
# batch generate figures from a parameter study like this:
# $ for QQ in 0.1 0.5 1.0; do for EE in 1 3 6; do ../basemapfigs.py p10km_q${QQ}_e${EE} velsurf_mag 100; done; done
# $ for QQ in 0.1 0.5 1.0; do for EE in 1 3 6; do convert -crop 274x486+50+6 +repage p10km_q${QQ}_e${EE}-velsurf_mag.png p10km-${QQ}-${EE}-csurf.png; done; done
from mpl_toolkits.basemap import Basemap
try:
from netCDF4 import Dataset as NC
except:
print "netCDF4 is not installed!"
sys.exit(1)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import sys
if len(sys.argv) < 2:
print "ERROR: first argument must be root of filename ..."
sys.exit(1)
rootname = sys.argv[1]
try:
nc = NC(rootname + '.nc', 'r')
except:
print "ERROR: can't read from file %s.nc ..." % rootname
sys.exit(2)
if len(sys.argv) >= 3:
field = sys.argv[2]
else:
field = 'velsurf_mag'
if len(sys.argv) >= 4:
mydpi = float(sys.argv[3])
else:
mydpi = 200
bluemarble = False # if True, use Blue Marble background
if (field == 'velsurf_mag') | (field == 'velbase_mag'):
fill = nc.variables[field]._FillValue
logscale = True
contour100 = True
myvmin = 1.0
myvmax = 6.0e3
ticklist = [2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
elif field == 'surfvelmag':
fill = 0.0
logscale = True
contour100 = True
myvmin = 1.0
myvmax = 6.0e3
ticklist = [2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
elif field == 'usurf':
fill = 0.0
logscale = False
contour100 = False
myvmin = 1.0
myvmax = 3500.0
ticklist = [100, 500, 1000, 1500, 2000, 2500, 3000, 3500]
elif field == 'mask':
fill = -1.0
logscale = False
contour100 = False
myvmin = 0.0
myvmax = 4.0
ticklist = [0, 1, 2, 3, 4]
elif field == 'basal_melt_rate_grounded':
fill = -2.0e+09
logscale = True
contour100 = False
myvmin = 0.9e-4
myvmax = 1.1
ticklist = [0.0001, 0.001, 0.01, 0.1, 1.0]
elif field == 'tillwat':
fill = -2.0e+09
logscale = False
contour100 = False
myvmin = 0.0
myvmax = 2.0
ticklist = [0.0, 0.5, 1.0, 1.5, 2.0]
elif field == 'bwat':
fill = -2.0e+09
logscale = True
contour100 = False
myvmin = 0.9e-4
myvmax = 1.1
ticklist = [0.0001, 0.001, 0.01, 0.1, 1.0]
elif field == 'bwprel':
fill = -2.0e+09
logscale = False
contour100 = False
myvmin = 0.0
myvmax = 1.0
ticklist = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
else:
print 'invalid choice for FIELD option'
sys.exit(3)
# we need to know longitudes and latitudes corresponding to grid
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
if field == 'surfvelmag':
lon = np.squeeze(lon).transpose()
lat = np.squeeze(lat).transpose()
# x and y *in the dataset* are only used to determine plotting domain
# dimensions
if field == 'surfvelmag':
x = nc.variables['x1'][:]
y = nc.variables['y1'][:]
else:
x = nc.variables['x'][:]
y = nc.variables['y'][:]
width = x.max() - x.min()
height = y.max() - y.min()
# load data
if field == 'bwprel':
thkvar = np.squeeze(nc.variables['thk'][:])
myvar = np.squeeze(nc.variables['bwp'][:])
myvar = np.ma.array(myvar, mask=(thkvar == 0.0))
thkvar = np.ma.array(thkvar, mask=(thkvar == 0.0))
myvar = myvar / (910.0 * 9.81 * thkvar)
else:
myvar = np.squeeze(nc.variables[field][:])
# mask out ice free etc.; note 'mask' does not get masked
if (field == 'surfvelmag'):
myvar = myvar.transpose()
thkvar = np.squeeze(nc.variables['thk'][:]).transpose()
myvar = np.ma.array(myvar, mask=(thkvar == 0.0))
elif (field != 'mask'):
maskvar = np.squeeze(nc.variables['mask'][:])
if (field == 'basal_melt_rate_grounded') | (field == 'bwat'):
myvar[myvar < myvmin] = myvmin
if (field == 'usurf'):
myvar = np.ma.array(myvar, mask=(maskvar == 4))
else:
myvar = np.ma.array(myvar, mask=(maskvar != 2))
m = Basemap(width=1.1 * width, # width in projection coordinates, in meters
height=1.05 * height, # height
resolution='l', # coastline resolution, can be 'l' (low), 'h'
# (high) and 'f' (full)
projection='stere', # stereographic projection
lat_ts=71, # latitude of true scale
lon_0=-41, # longitude of the plotting domain center
lat_0=72) # latitude of the plotting domain center
# m.drawcoastlines()
# draw the Blue Marble background (requires PIL, the Python Imaging Library)
if bluemarble: # seems to reverse N and S
m.bluemarble()
# convert longitudes and latitudes to x and y:
xx, yy = m(lon, lat)
if contour100:
# mark 100 m/a contour in black:
m.contour(xx, yy, myvar, [100], colors="black")
# plot log color scale or not
if logscale:
m.pcolormesh(xx, yy, myvar,
norm=colors.LogNorm(vmin=myvmin, vmax=myvmax))
else:
m.pcolormesh(xx, yy, myvar, vmin=myvmin, vmax=myvmax)
# add a colorbar:
plt.colorbar(extend='both',
ticks=ticklist,
format="%d")
# draw parallels and meridians
# labels kwarg is where to draw ticks: [left, right, top, bottom]
m.drawparallels(np.arange(-55., 90., 5.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-120., 30., 10.), labels=[0, 0, 0, 1])
outname = rootname + '-' + field + '.png'
print "saving image to file %s ..." % outname
plt.savefig(outname, dpi=mydpi, bbox_inches='tight')
| gpl-3.0 |
Diego-Ibarra/aquamod | aquamod/model_Embayment_PlanktonMussels.py | 1 | 19298 | '''
Fennel et al (2006) Nitrogen cycling in the Middle Atlantic Bight: Results from a
three-dimensional model and implications for the North Atlantic nitrogen budget.
GLOBAL BIOGEOCHEMICAL CYCLES, VOL. 20, GB3007, doi:10.1029/2005GB002456
'''
def load_defaults():
'''
This function creates a dictionaries called "par" and "InitCond"
and pre-loads them with all the default
parameters and initial conditions, respectively.
Also outputs days and dt
'''
# Framework
days = 365 * 3 # Three year
dt = 0.01 # units: days
# Parameters
par = {}
par['mu0'] = 0.69
par['kNO3'] = 0.5
par['kNH4'] = 0.5
par['alpha'] = 0.125
par['gmax'] = 0.6 #Original 0.6
par['kP'] = 2
par['mP'] = 0.15
par['tau'] = 0.005
par['thetaMax']= 0.053
par['beta'] = 0.75
par['lBM'] = 0.01
par['lE'] = 0.01
par['mZ'] = 0.025
par['rSD'] = 0.3 # Original 0.03
par['rLD'] = 0.1 # # Original 0.01
par['nmax'] = 0.05
par['kI'] = 0.1
par['I0'] = 0.0095
par['wP'] = 0.1
par['wS'] = 0.1
par['wL'] = 1.
par['AE_P'] = 0.9
par['AE_D'] = 0.2
par['AE_Z'] = 0.3
par['Bpub'] = 0.43
par['Fmax_ref']= 0.03#0.025
par['GT'] = 0.44
par['KTempH']= 0.1
par['KTempL']= 0.5
par['KSaltL']= 0.25
par['KOxyL'] = 0.02
par['KFood'] = 1.
par['KRE'] = 0.86
par['OxyL'] = 17.5
par['Rm'] = 0.002
par['SaltL'] = 10.
par['TempH'] = 25.
par['TempL'] = -4.
par['beta'] = 0.12
par['epsilonP'] = 1.
par['epsilonD'] = 0.5
par['epsilonZ'] = 0.3
par['lamda_nat'] = 0.0 # 0.00137
par['lamda_harvest'] = 0.0 # 0.001
# Physical characteristics of embayment
par['chi'] = 0.01
par['X'] = 2000#2000 # Basin length
par['Y'] = 200#200 # Basin width
par['Z'] = 10 # Basin depth
par['V'] = par['X'] * par['Y'] * par['Z']
par['uwind'] = 0.5
par['vwind'] = 0.5
# Initial conditions
InitCond = {}
InitCond['Phy'] = 0.5
InitCond['Zoo'] = 0.2
InitCond['SDet'] = 0.5
InitCond['LDet'] = 0.3
InitCond['NH4'] = 0.1
InitCond['NO3'] = 5.
InitCond['Oxy'] = 340. #Oxygen
InitCond['Soma'] = 3.0
InitCond['Gonad'] = 0.0
InitCond['conc_muss'] = 20.
InitCond['n_muss'] = InitCond['conc_muss'] * par['V']
return days, dt, par, InitCond
def run_model(days,dt,InitCond,par,forc):
'''
This is your model. Do a brief description.
'''
# Import libraries
import numpy as np
print 'Starting model run with ' + str(InitCond['conc_muss']) + ' mussels/m3 ...'
# Make sure n_muss is correctly estimated
InitCond['n_muss'] = InitCond['conc_muss'] * par['V']
# Setup the framework (calculate timestemps, create zero vectors, create time vector)
NoSTEPS = int(days / dt) # Calculates the number of steps by dividing days by dt and rounding down
time = np.linspace(0,days,NoSTEPS) # Makes and vector array of equally spaced numbers from zero to "days"
# Create zero-vectors
Phy = np.zeros((NoSTEPS,),float) # makes a vector array of zeros (size: NoSTEPS rows by ONE column)
Zoo = np.zeros((NoSTEPS,),float) # same as above
SDet = np.zeros((NoSTEPS,),float) # Biomass - same as above
LDet = np.zeros((NoSTEPS,),float) # same as above
NH4 = np.zeros((NoSTEPS,),float) # same as above
NO3 = np.zeros((NoSTEPS,),float) # same as above
Oxy = np.zeros((NoSTEPS,),float) # same as above
mu = np.zeros((NoSTEPS,),float) # same as above
f_I = np.zeros((NoSTEPS,),float) # same as above
L_NO3 = np.zeros((NoSTEPS,),float) # same as above
L_NH4 = np.zeros((NoSTEPS,),float) # same as above
airwater_O2_flux = np.zeros((NoSTEPS,),float) # same as above
TotN = np.zeros((NoSTEPS,),float) # same as above
# Mussels
Soma = np.zeros((NoSTEPS,),float) # makes a vector array of zeros (size: NoSTEPS rows by ONE column)
Gonad = np.zeros((NoSTEPS,),float) # same as above
B = np.zeros((NoSTEPS,),float) # Biomass - same as above
B_conc = np.zeros((NoSTEPS,),float) # Biomass - same as above
L_Temp = np.zeros((NoSTEPS,),float) # same as above
L_Salt = np.zeros((NoSTEPS,),float) # same as above
L_Oxy = np.zeros((NoSTEPS,),float) # same as above
L_Food = np.zeros((NoSTEPS,),float) # same as above
F = np.zeros((NoSTEPS,),float) # same as above
A = np.zeros((NoSTEPS,),float) # same as above
R = np.zeros((NoSTEPS,),float) # same as above
RE = np.zeros((NoSTEPS,),float) # same as above
Spawning = np.zeros((NoSTEPS,),float) # same as above
n_muss = np.zeros((NoSTEPS,),float) # same as above
CumulativeHarvest = np.zeros((NoSTEPS,),float) # same as above
# Initializing with initial conditions
Phy[0] = InitCond['Phy']
Zoo[0] = InitCond['Zoo']
SDet[0] = InitCond['SDet']
LDet[0] = InitCond['LDet']
NH4[0] = InitCond['NH4']
NO3[0] = InitCond['NO3']
Oxy[0] = InitCond['Oxy']
# Mussels
Soma[0] = InitCond['Soma']
Gonad[0] = InitCond['Soma']
B[0] = InitCond['Soma'] + InitCond['Gonad']
Spawning[0] = 0.
n_muss[0] = InitCond['n_muss']
B_conc[0] = B[0] * n_muss[0] / par['V']
# Forcing
Salt = forc['Salt']
Temp = forc['Temp']
I = forc['I']
Phy0 = forc['Phy']
Zoo0 = forc['Zoo']
SDet0 = forc['SDet']
LDet0 = forc['LDet']
NH40 = forc['NH4']
NO30 = forc['NO3']
Oxy0 = forc['Oxy']
# *****************************************************************************
# MAIN MODEL LOOP *************************************************************
for t in range(0,NoSTEPS-1):
muMax = par['mu0'] * (1.066 ** Temp[t]) # text
f_I[t] = (par['alpha']*I[t])/(np.sqrt(muMax**2+((par['alpha']**2)*(I[t]**2)))) #Eq5
L_NO3[t] = (NO3[t]/(par['kNO3']+NO3[t])) * (1/(1+(NH4[t]/par['kNH4']))) #Eq3
L_NH4[t] = NH4[t]/(par['kNH4']+NH4[t]) # Eq 4
mu[t] =muMax * f_I[t] * (L_NO3[t] + L_NH4[t]) # Eq2
g = par['gmax'] * ((Phy[t]**2)/(par['kP']+(Phy[t]**2)))
n = par['nmax'] * (1 - max(0,(I[t]-par['I0'])/(par['kI']+I[t]-par['I0'])))
n_O2 = (Oxy[t]/(3.+Oxy[t]))
dPhydt = (mu[t] * Phy[t]) - \
(g * Zoo[t]) - \
(par['mP'] * Phy[t]) - \
(par['tau']*(SDet[t]+Phy[t])*Phy[t]) - \
(par['wP']*Phy[t]/par['Z']) # Eq1
dZoodt = (g * par['beta'] * Zoo[t]) - \
(par['lBM']*Zoo[t]) - \
(par['lE']*((Phy[t]**2)/(par['kP']+(Phy[t]**2)))*par['beta']*Zoo[t]) - \
(par['mZ']*(Zoo[t]**2))#Eq10
dSDetdt = (g * (1-par['beta']) * Zoo[t]) + \
(par['mZ']*(Zoo[t]**2)) + \
(par['mP'] * Phy[t]) - \
(par['tau']*(SDet[t]+Phy[t])*SDet[t]) - \
(par['rSD']*SDet[t]) - \
(par['wS']*SDet[t]/par['Z'])
dLDetdt = (par['tau']*((SDet[t]+Phy[t])**2)) - \
(par['rLD']*LDet[t]) - \
(par['wL']*LDet[t]/par['Z'])
dNO3dt = -(muMax * f_I[t] * L_NO3[t] * Phy[t]) + \
(n * n_O2 * NH4[t])
dNH4dt = -(muMax * f_I[t] * L_NH4[t] * Phy[t]) - \
(n * n_O2 * NH4[t]) + \
(par['lBM'] * Zoo[t]) + \
(par['lE']*((Phy[t]**2)/(par['kP']+(Phy[t]**2)))*par['beta']*Zoo[t]) + \
(par['rSD']*SDet[t]) + \
(par['rLD']*LDet[t]) + \
(par['wP']*Phy[t]/par['Z']) + \
(par['wS']*SDet[t]/par['Z']) + \
(par['wL']*LDet[t]/par['Z'])
# MUSSELS -------------------------------------------------------------
# Calculate Temperature Limitation
L_Temp[t] = min(max(0.,1.-np.exp(-par['KTempL']*(Temp[t]-par['TempL']))), \
max(0.,1.+((1.-np.exp(par['KTempH']*Temp[t]))/(np.exp(par['KTempH']*par['TempH'])-1.))))
# Calculate Salinity Limitation
L_Salt[t] = max(0.,1.-np.exp(-par['KSaltL']*(Salt[t]-par['SaltL'])))
# Calculate Oxygen Limitation
L_Oxy[t] = max(0.,1.-np.exp(-par['KOxyL']*(Oxy[t]-par['OxyL'])))
# Calculate Oxygen Limitation
L_Food[t] = (Phy[t]+Zoo[t]+SDet[t])/(par['KFood']+Phy[t]+Zoo[t]+SDet[t])
# Calculate Filtration rate
Fmax = par['Fmax_ref']*(B[t]**(2./3.))
F[t] = Fmax * L_Temp[t] * L_Salt[t] * L_Oxy[t] * L_Food[t]
A[t] = F[t] * ((par['epsilonP']*par['AE_P']*Phy[t])+ \
(par['epsilonZ']*par['AE_Z']*Zoo[t])+ \
(par['epsilonD']*par['AE_D']*SDet[t]))
R[t] = (par['Rm']*B[t]) + (par['beta']*A[t])
RE[t] = max(0., (B[t]-par['Bpub'])/(par['KRE'] + B[t] - (2.*par['Bpub'])))
# Spawning
if n_muss[t] == 0.:
Spawning[t] = 0.
dGonaddt = 0
dSomadt = 0.
elif Gonad[t]/B[t] < par['GT']:
Spawning[t] = 0.
dGonaddt = (A[t]-R[t]) * RE[t]
dSomadt = (A[t]-R[t]) * (1.-RE[t])
offset = Gonad[t] + (dGonaddt * dt)
if offset < 0 : # If Gonad is going to be negative... don't apply dynamic allocation
dGonaddt = 0.
dSomadt = A[t]-R[t]
elif Gonad[t]/B[t] >= par['GT']:
Spawning[t] = Gonad[t]
dGonaddt = 0.
dSomadt = A[t]-R[t]
#Feedback to NPZD2 model
# Faeces and Pseudofaeces
Fae = F[t] * ((par['epsilonP']*(1-par['AE_P'])*Phy[t])+ \
(par['epsilonZ']*(1-par['AE_Z'])*Zoo[t])+ \
(par['epsilonD']*(1-par['AE_D'])*SDet[t]))
dLDetdt = dLDetdt + (Fae*(n_muss[t]/par['V']))
# Remove eaten Phy, Zoo and SDet from water-column
dPhydt = dPhydt-((F[t] *par['epsilonP']*Phy[t])*(n_muss[t]/par['V']))
dZoodt = dZoodt-((F[t] *par['epsilonZ']*Zoo[t])*(n_muss[t]/par['V']))
dSDetdt = dSDetdt -((F[t] *par['epsilonD']*SDet[t])*(n_muss[t]/par['V']))
# Mussel population
Harvest = par['lamda_harvest'] * n_muss[t]
NatMortality = par['lamda_nat'] * n_muss[t]
dn_mussdt = -NatMortality - Harvest
# Excretion into Ammonia
dNH4dt = dNH4dt + ((R[t]*n_muss[t]/par['V']) + ((NatMortality*B[t])/par['V']))
# Oxygen sub-model =========================================
# Parameters
OA0 = 2.00907 # Oxygen
OA1 = 3.22014 # saturation
OA2 = 4.05010 # coefficients
OA3 = 4.94457
OA4 =-0.256847
OA5 = 3.88767
OB0 =-0.00624523
OB1 =-0.00737614
OB2 =-0.0103410
OB3 =-0.00817083
OC0 =-0.000000488682
rOxNO3= 8.625 # 138/16
rOxNH4= 6.625 # 106/16
l2mol = 1000.0/22.9316 # liter to mol
#-----------------------------------------------------------------------
# Surface O2 gas exchange.
#-----------------------------------------------------------------------
# Compute surface O2 gas exchange.
cff2=0.31*(24.0/100.0)
# Compute O2 transfer velocity : u10squared (u10 in m/s)
u10squ=(par['uwind']*par['uwind'])+(par['vwind']*par['vwind'])
# Calculate the Schmidt number for O2 in sea water (Wanninkhof, 1992).
SchmidtN_Ox=1953.4-Temp[t]*(128.0-Temp[t]*(3.9918-Temp[t]*0.050091))
cff3=cff2*u10squ*np.sqrt(660.0/SchmidtN_Ox)
# Calculate O2 saturation concentration using Garcia and Gordon
# L&O (1992) formula, (EXP(AA) is in ml/l).
TS=np.log((298.15-Temp[t])/(273.15+Temp[t]))
AA=OA0+TS*(OA1+TS*(OA2+TS*(OA3+TS*(OA4+TS*OA5))))+ \
Salt[t]*(OB0+TS*(OB1+TS*(OB2+TS*OB3)))+ \
OC0*Salt[t]*Salt[t]
# Convert from ml/l to mmol/m3.
O2satu=l2mol*np.exp(AA)
# Add in O2 gas exchange.
O2_Flux = cff3*(O2satu-Oxy[t])
airwater_O2_flux[t] = O2_Flux * (1./par['Z'])
dOxydt = airwater_O2_flux[t]
# Production via Photosynthesys
dOxydt = dOxydt + (muMax * f_I[t] * L_NO3[t] * Phy[t] * rOxNO3) # New production
dOxydt = dOxydt + (muMax * f_I[t] * L_NH4[t] * Phy[t] * rOxNH4) # Regenerated production
# Respiration
dOxydt = dOxydt - (((par['lBM']*Zoo[t]) - \
(par['lE']*((Phy[t]**2)/(par['kP']+(Phy[t]**2)))*par['beta']*Zoo[t]) - \
(par['mZ']*(Zoo[t]**2))) * rOxNH4) # Zooplankton
dOxydt = dOxydt - ((n * n_O2 * NH4[t])* rOxNH4 * 2) # Nitrification
dOxydt = dOxydt - (((par['rSD']*SDet[t])+(par['rLD']*LDet[t])) * rOxNH4) #S and L Detritus remineralization
dOxydt = dOxydt - (((par['wS']*SDet[t]/par['Z']) + \
(par['wL']*LDet[t]/par['Z'])) * rOxNH4) #S and L Detritus remineralization in sediments
dOxydt = dOxydt - ((((R[t]*n_muss[t])/par['V']) + (NatMortality/par['V'])) * rOxNH4) #Mussels
# Physical Model ======================================================
dPhydt = dPhydt + (par['chi'] * (Phy0[t] - Phy[t]))
dZoodt = dZoodt + (par['chi'] * (Zoo0[t] - Zoo[t]))
dNH4dt = dNH4dt + (par['chi'] * (NH40[t] - NH4[t]))
dNO3dt = dNO3dt + (par['chi'] * (NO30[t] - NO3[t]))
dSDetdt = dSDetdt + (par['chi'] * (SDet0[t] - SDet[t]))
dLDetdt = dLDetdt + (par['chi'] * (LDet0[t] - LDet[t]))
dOxydt = dOxydt + (par['chi'] * (Oxy0[t] - Oxy[t]))
# Update and step ----------------------------------------------------
Phy[t+1] = Phy[t] + (dPhydt * dt)
Zoo[t+1] = Zoo[t] + (dZoodt * dt) + ((Spawning[t]*n_muss[t])/par['V'])
SDet[t+1] = SDet[t] + (dSDetdt * dt)
LDet[t+1] = LDet[t] + (dLDetdt * dt)
Oxy[t+1] = max(0,Oxy[t] + (dOxydt * dt))
NH4[t+1] = NH4[t] + (dNH4dt * dt)
NO3[t+1] = NO3[t] + (dNO3dt * dt)
if NO3[t+1] <= 0.001:
offset = NO3[t+1]
NH4[t+1] = NH4[t+1] + offset
NO3[t+1] = NO3[t+1] - offset
# Mussels
Soma[t+1] = Soma[t] + (dSomadt * dt)
Gonad[t+1] = Gonad[t] + (dGonaddt * dt) - Spawning[t]
B[t+1] = Soma[t+1] + Gonad[t+1]
n_muss[t+1] = max(0,n_muss[t] + (dn_mussdt * dt))
CumulativeHarvest[t+1] = CumulativeHarvest[t] + Harvest
B_conc[t+1] = B[t+1] * n_muss[t+1] / par['V']
# Estimate Total Nitrogen
TotN[t+1] = Phy[t+1] + Zoo[t+1] + SDet[t+1] + LDet[t+1] + NH4[t+1] + NO3[t+1] + ((B[t+1]*n_muss[t+1])/par['V'])
# end of main model LOOP*******************************************************
# *****************************************************************************
# Pack output into dictionary
output = {}
output['par'] = par
output['InitCond'] = InitCond
output['time'] = time
output['Phy'] = Phy
output['Zoo'] = Zoo
output['SDet'] = SDet
output['LDet'] = LDet
output['NH4'] = NH4
output['NO3'] = NO3
output['Oxy'] = Oxy
output['I'] = I
output['mu'] = mu
output['f_I'] = f_I
output['L_NO3'] = L_NO3
output['L_NH4'] = L_NH4
output['TotN'] = TotN
output['airwater_O2_flux'] = airwater_O2_flux
output['Soma'] = Soma
output['Gonad'] = Gonad
output['B'] = B
output['n_muss'] = n_muss
output['Spawning'] = Spawning
output['CumulativeHarvest'] = CumulativeHarvest
output['F'] = F
output['L_Temp'] = L_Temp
output['L_Salt'] = L_Salt
output['L_Oxy'] = L_Oxy
output['L_Food'] = L_Food
output['B_conc'] = B_conc
print "Model run: DONE!!!"
return output
def plot_model(output):
'''
Script to make plots
'''
# Import libraries
import matplotlib.pyplot as plt
# Plotting
fig, (ax, ax2, ax3) = plt.subplots(3,1,figsize=(13,13))
ax.plot(output['time']/365,output['Phy'],'g-')
ax.plot(output['time']/365,output['Zoo'],'r-')
ax.plot(output['time']/365,output['SDet'],'k-')
ax.plot(output['time']/365,output['LDet'],'k-.')
ax.plot(output['time']/365,output['NH4'],'m-')
ax.plot(output['time']/365,output['NO3'],'c-')
ax.plot(output['time']/365,output['B_conc'],'r.')
ax.set_ylabel('Nitrogen \n (mmol N m$^{-3}$)')
ax.set_title('Ecosystem Model - Plankton Ecosystem')
ax.legend(['Phy','Zoo','SDet','LDet','NH4','NO3','B_conc'])
ax2.plot(output['time']/365,output['Oxy'],'b-')
ax2.set_ylabel('Oxygen \n (mmol O2 m$^{-3}$)')
ax2.legend(['f_I','Mu','L_NO3','L_NH4'])
ax3.plot(output['time']/365,output['f_I'],'r-')
ax3.plot(output['time']/365,output['mu'],'g-')
ax3.plot(output['time']/365,output['L_NO3'],'b-')
ax3.plot(output['time']/365,output['L_NH4'],'k-')
ax3.set_ylabel('Plankton Diagnostics \n (dimensionless)')
ax3.legend(['f_I','Mu','L_NO3','L_NH4'])
fig2, (ax, ax2, ax3, ax4) = plt.subplots(4,1,figsize=(13,13))
ax.plot(output['time']/365,output['B'],'r.')
ax.plot(output['time']/365,output['Gonad'],'b-')
ax.plot(output['time']/365,output['Soma'],'k-')
ax.legend(['B','Gonad','Soma'])
ax.set_title('Ecosystem Model - Mussels')
ax2.plot(output['time']/365,output['n_muss'],'g-')
# ax2.plot(output['time']/365,output['CumulativeHarvest'],'r-')
ax2.set_ylabel('Filtration rate \n (L ind$^{-1}$ h$^{-1}$)')
ax2.legend(['Total Number of \n mussels in bay'])
ax3.plot(output['time']/365,output['F']*1000/24,'g-')
ax3.set_ylabel('Filtration rate \n (L ind$^{-1}$ h$^{-1}$)')
ax3.legend(['F'])
ax4.plot(output['time']/365,output['L_Temp'],'b-')
ax4.plot(output['time']/365,output['L_Salt'],'m-')
ax4.plot(output['time']/365,output['L_Oxy'],'k-')
ax4.plot(output['time']/365,output['L_Food'],'r-')
ax4.legend(['L_Temp','L_Salt','L_Oxy','L_Food'])
ax4.set_ylabel('Mussel Diagnostics \n (dimensionless)')
ax4.set_xlabel('Time (years)')
plt.show()
return
def plot_totalNitrogen(output):
import matplotlib.pyplot as plt
fig3, (ax) = plt.subplots(1,1)
ax.plot(output['time']/365,output['TotN'],'y.')
ax.legend(['TotN'])
ax.set_title('Ecosystem Model - Total Nitrogen')
ax.set_ylabel('Nitrogen \n (mmol N)')
ax.set_xlabel('Time (years)')
return
if __name__ == '__main__':
import load_forcing
days, dt, par, InitCond = load_defaults()
forc = load_forcing.get_forcing(dt,days)
output = run_model(days,dt,InitCond,par,forc)
# load_forcing.plot_forcing(dt,days,forc)
plot_model(output)
# plot_totalNitrogen(output)
| mit |
bigfishman/oceanbase | oceanbase_0.4/tools/deploy/perf/perf_base.py | 12 | 18473 | # -*- coding: utf-8 -*-
import time
import datetime
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
def perf_client_attrs():
prepare_data = '''sh: ssh $usr@$ip "${client_env_vars} client_idx=$idx $dir/$client/stress.sh prepare ${type} ${client_start_args}"'''
return locals()
def perf_ct_attrs():
prepare_data = 'all: client prepare_data type=all'
reboot_to_prepare_data = 'seq: stop conf rsync clear prepare_data'
return locals()
def perf_role_attrs():
save_profile = '''sh: scp $usr@$ip:$dir/log/$role.profile $_rest_'''
save_gprofile = '''sh: scp $usr@$ip:$gprofiler_output $_rest_'''
start_periodic_pstack = '''sh: ssh $usr@$ip "sh $dir/tools/periodic_pstack.sh $__self__ \`pgrep -f ^$exe\` $dir/log/pstacks/ $ptime > /dev/null 2>&1 < /dev/zero &"'''
save_periodic_pstack = '''sh: scp -r $usr@$ip:$dir/log/pstacks/$__self__ $_rest_'''
return locals()
def perf_obi_attrs():
perf_load_params = r'''call: obmysql extra="-e \"alter system set merge_delay_interval='1s' server_type=chunkserver;\""'''
perf_create_tables = r'''call: obmysql extra="< perf/${case}.create"'''
perf_init = 'seq: perf_load_params perf_create_tables sleep[sleep_time=2]'
perf = 'seq: reboot sleep[sleep_time=10] perf_init perf_prepare.reboot_to_prepare_data turn_on_perf perf_run.reboot sleep[sleep_time=$ptime] perf_run.stop turn_off_perf collect_perf'
perf_ups = 'seq: reboot sleep[sleep_time=10] perf_init turn_on_perf perf_run.reboot sleep[sleep_time=$ptime] perf_run.stop turn_off_perf collect_perf'
running_ts = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
local_tmp_dir = "tmp.%s" % (running_ts)
def get_cluster_ips(*args, **ob):
def get_server_ips(server_role):
server_list = get_match_child(ob, server_role)
return [find_attr(find_attr(ob, k), "ip") for k in server_list.keys()]
cluster_ips = get_server_ips("mergeserver") + get_server_ips("chunkserver") + get_server_ips("updateserver") + get_server_ips("rootserver")
seen = set()
seen_add = seen.add
return [ x for x in cluster_ips if x not in seen and not seen_add(x)]
def turn_on_perf(*args, **ob):
# turn on profile log and gprofiler
all_do(ob, 'mergeserver', 'kill', '-50')
all_do(ob, 'chunkserver', 'kill', '-50')
all_do(ob, 'mergeserver', 'kill', '-60')
all_do(ob, 'chunkserver', 'kill', '-60')
all_do(ob, 'updateserver', 'kill', '-60')
#call_(ob, 'ms0.kill', '-50')
#call_(ob, 'ms0.kill', '-60')
#call_(ob, 'cs0.kill', '-60')
#call_(ob, 'ups0.kill', '-60')
for ip in get_cluster_ips(*args, **ob):
ssh(ip, sub2("$dir/tools/linuxmon_x64.bin time=${ptime}s wait=1 back=yes > $dir/server_stats 2>&1 < /dev/zero &", ob))
#call_(ob, 'ms0.start_periodic_pstack')
#call_(ob, 'cs0.start_periodic_pstack')
#call_(ob, 'ups0.start_periodic_pstack')
def turn_off_perf(*args, **ob):
# turn off profile log and gprofiler
all_do(ob, 'mergeserver', 'kill', '-51')
all_do(ob, 'chunkserver', 'kill', '-51')
all_do(ob, 'mergeserver', 'kill', '-61')
all_do(ob, 'chunkserver', 'kill', '-61')
all_do(ob, 'updateserver', 'kill', '-61')
#call_(ob, 'ms0.kill', '-51')
#call_(ob, 'ms0.kill', '-61')
#call_(ob, 'cs0.kill', '-61')
#call_(ob, 'ups0.kill', '-61')
def pprof_gprofile_output(server_bin, profile_output, perf_result_dir, *args, **ob):
st = time.time()
cost_graph_name = "${role}_${ip}_cost_graph.pdf"
cost_graph_path = "%s/%s" % (perf_result_dir, cost_graph_name)
top50_cmd = sub2("pprof --text $server_bin $profile_output", locals())
graph_cmd = sub2("pprof --pdf $server_bin --edgefraction=0 $profile_output > $cost_graph_path", locals())
pro_res = popen(sub2(top50_cmd, ob)).splitlines()
i = 0
while i < len(pro_res):
if pro_res[i].startswith('Total: '):
i += 1
break
i += 1
func_top50 = '\n'.join(pro_res[i:i + 50])
sh(sub2(graph_cmd, ob))
info('drawing %s profile graph costs %ss' % (server_bin, time.time() - st))
output = """
<p>$role $ip 函数消耗排名前50:
<pre>%s</pre></p>
<p><a href="%s">函数消耗线框图</a></p>
""" % (func_top50, cost_graph_name)
return sub2(output, ob)
def parse_profile_log(profile_log, perf_result_dir, *args, **ob):
time_format = "%Y-%m-%d %H:%M:%S"
query_ratio = int(sub2("$ptime", ob), 10)
d = dict()
start_time = None
end_time = None
sql_count = 0
real_sql_count = 0
sql_time = 0
sql_time_dist = dict()
wait_time = 0
ud = dict(sql_count = 0, real_sql_count = 0, sql_time = 0, wait_time = 0)
qps2time = dict()
rpcs = dict()
rpcs_html = ""
wait_times = []
parse_log_st = time.time()
def get_packet_name(pcode):
if pcode == 4002: return "OB_PHY_PLAN_EXECUTE"
elif pcode == 409: return "OB_SQL_GET_REQUEST"
elif pcode == 405: return "OB_SQL_SCAN_REQUEST"
else: return "OB_UNKNOWN_PACKET"
def add_sql(trace_id, ts, sqlt, waitt, rpc_list):
if qps2time.has_key(ts):
qps2time[ts] += query_ratio
else:
qps2time[ts] = 0
ud['sql_count'] += query_ratio
ud['real_sql_count'] += 1
ud['sql_time'] += sqlt
if sql_time_dist.has_key(sqlt):
sql_time_dist[sqlt] += query_ratio
else:
sql_time_dist[sqlt] = 0
ud['wait_time'] += waitt
wait_times.append(waitt)
for rpc in rpc_list:
if rpcs.has_key(rpc['pcode']):
rpcs[rpc['pcode']]['rpc_time'] += rpc['latency']
rpcs[rpc['pcode']]['rpc_times'].append(rpc['latency'])
else:
rpcs[rpc['pcode']] = dict(rpc_time = rpc['latency'], rpc_times = [rpc['latency']])
with open(sub2(profile_log, ob)) as f:
for l in f:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(6))
trace_id = m.group(1)
ts = m.group(6)[:-6]
sql_time = int(m.group(2))
wait_time = int(m.group(3))
rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4)))]
add_sql(trace_id, ts, sql_time, wait_time, rpc_list)
else:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(8))
trace_id = m.group(1)
ts = m.group(8)[:-6]
sql_time = int(m.group(2))
wait_time = int(m.group(3))
rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4))),
dict(pcode = int(m.group(7)), latency = int(m.group(6))),]
add_sql(trace_id, ts, sql_time, wait_time, rpc_list)
else:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(10))
trace_id = m.group(1)
ts = m.group(10)[:-6]
sql_time = int(m.group(2))
wait_time = int(m.group(3))
rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4))),
dict(pcode = int(m.group(7)), latency = int(m.group(6))),
dict(pcode = int(m.group(9)), latency = int(m.group(8))),]
add_sql(trace_id, ts, sql_time, wait_time, rpc_list)
if start_time is None and end_time is not None:
start_time = end_time
info("parsing log costs %ss" % (time.time() - parse_log_st))
sql_time = ud['sql_time']
sql_count = ud['sql_count']
real_sql_count = ud['real_sql_count']
wait_time = ud['wait_time']
drawing_st = time.time()
if end_time is None:
elapsed_seconds = 0
qps = 0
avg_sql_time = 0
avg_wait_time = 0
for pcode, rpc in rpcs.items():
rpc['avg_rpc_time'] = 0
else:
elapsed_seconds = (end_time - start_time) / 10**6
if elapsed_seconds > 0:
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / real_sql_count
avg_wait_time = float(wait_time) / real_sql_count
else:
qps = 0
avg_sql_time = 0
avg_wait_time = 0
for pcode, rpc in rpcs.items():
rpc['avg_rpc_time'] = float(rpc['rpc_time']) / len(rpc['rpc_times'])
if plt is not None:
plt.plot([x for k,x in sorted(qps2time.items())], '-')
plt.xlabel('Timeline')
plt.ylabel('QPS')
plt.savefig(sub2("%s/${role}_${ip}_qps.png" % (perf_result_dir), ob))
plt.clf()
plt.bar(sql_time_dist.keys(), sql_time_dist.values())
plt.xlabel('Response Time (us)')
plt.ylabel('Number of Requests')
plt.savefig(sub2("%s/${role}_${ip}_total_time_dist.png" % (perf_result_dir), ob))
plt.clf()
plt.plot(wait_times, ',')
plt.xlabel('Timeline')
plt.ylabel('Wait Time in Mergeserver Queue (us)')
plt.savefig(sub2("%s/${role}_${ip}_queue_time.png" % (perf_result_dir), ob))
plt.clf()
for pcode, rpc in rpcs.items():
plt.plot(rpc['rpc_times'], ',')
plt.xlabel('Timeline')
plt.ylabel('Response Time (us)')
plt.savefig(sub2("%s/${role}_${ip}_%s.png" % (perf_result_dir, pcode), ob))
plt.clf()
rpcs_html += sub2("""<p>$role $ip %s(%s)请求次数:%s 平均响应延时:%sus<br><img src="${role}_${ip}_%s.png" /></p>"""
% (pcode, get_packet_name(pcode), len(rpc['rpc_times']), rpc['avg_rpc_time'], pcode), ob)
info("drawing performance graph costs %ss" % (time.time() - drawing_st))
parse_perf = sub2(sub2("""
<p> ${role} ${ip} 测试运行时间:${elapsed_seconds}s<br>
${role} ${ip} SQL请求次数:$sql_count<br>
${role} ${ip} MS的QPS:$qps</p>
<p>${role} ${ip} QPS:<br>
<img src="${role}_${ip}_qps.png" /></p>
<p>${role} ${ip} 平均响应延时:${avg_sql_time}us<br>
<img src="${role}_${ip}_total_time_dist.png" /></p>
<p>${role} ${ip} MS队列中平均花费的时间:${avg_wait_time}us<br>
<img src="${role}_${ip}_queue_time.png" /></p>
$rpcs_html
""", locals()), ob)
return dict(
parse_res = parse_perf,
stat = dict(
sql_count = sql_count,
real_sql_count = real_sql_count,
elapsed_seconds = elapsed_seconds,
sql_time = sql_time,
wait_time = wait_time,
qps2time = qps2time,
sql_time_dist = sql_time_dist,
wait_times = wait_times,
rpcs = rpcs,
)
)
def collect_perf(*args, **ob):
def get_server_list(server_role):
server_list = get_match_child(ob, server_role)
server_list_str = ' '.join('${%s.ip}:${%s.port}'%(k, k) for k in server_list.keys())
return server_list_str
perf_result_dir = "/home/yanran.hfs/public_html/ob_perf/not_published/%s" % (running_ts)
os.mkdir(perf_result_dir)
os.mkdir(local_tmp_dir)
ms_profile = '%s/$role.$ip.profile' % (local_tmp_dir)
all_do(ob, 'mergeserver', 'save_gprofile', local_tmp_dir)
all_do(ob, 'chunkserver', 'save_gprofile', local_tmp_dir)
all_do(ob, 'updateserver', 'save_gprofile', local_tmp_dir)
all_do(ob, 'mergeserver', 'save_profile', ms_profile)
#call_(ob, 'ms0.save_periodic_pstack', perf_result_dir)
#call_(ob, 'cs0.save_periodic_pstack', perf_result_dir)
#call_(ob, 'ups0.save_periodic_pstack', perf_result_dir)
ms_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "ms0.gprofiler_output"), '/')[-1])
cs_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "cs0.gprofiler_output"), '/')[-1])
ups_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "ups0.gprofiler_output"), '/')[-1])
ms_gprof = all_do(ob, 'mergeserver', 'pprof_gprofile_output', "bin/mergeserver", ms_gprofile_output, perf_result_dir)
cs_gprof = all_do(ob, 'chunkserver', 'pprof_gprofile_output', "bin/chunkserver", cs_gprofile_output, perf_result_dir)
ups_gprof = all_do(ob, 'updateserver', 'pprof_gprofile_output', "bin/updateserver", ups_gprofile_output, perf_result_dir)
ms_gprof = ''.join([x[1] for x in ms_gprof])
cs_gprof = ''.join([x[1] for x in cs_gprof])
ups_gprof = ''.join([x[1] for x in ups_gprof])
ms_prof = all_do(ob, 'mergeserver', 'parse_profile_log', ms_profile, perf_result_dir)
ms_prof_htmls = ''.join([x[1]['parse_res'] for x in ms_prof])
sql_count = 0
real_sql_count = 0
elapsed_seconds = 0
sql_time = 0
wait_time = 0
qps2time = None
sql_time_dist = None
for ms_tuple in ms_prof:
ms = ms_tuple[1]['stat']
if elapsed_seconds < ms['elapsed_seconds']:
elapsed_seconds = ms['elapsed_seconds']
sql_count += ms['sql_count']
real_sql_count += ms['real_sql_count']
sql_time += ms['sql_time']
wait_time += ms['wait_time']
qps2time = dict_add(qps2time, ms['qps2time'])
sql_time_dist = dict_add(sql_time_dist, ms['sql_time_dist'])
if elapsed_seconds == 0:
qps = 0
avg_sql_time = 0
else:
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / real_sql_count
if plt is not None:
plt.plot([x for k,x in sorted(qps2time.items())], '-')
plt.xlabel('Timeline')
plt.ylabel('QPS')
plt.savefig("%s/cluster_qps.png" % (perf_result_dir))
plt.clf()
plt.bar(sql_time_dist.keys(), sql_time_dist.values())
plt.xlabel('Response Time (us)')
plt.ylabel('Number of Requests')
plt.savefig("%s/cluster_total_time_dist.png" % (perf_result_dir))
plt.clf()
user_name = find_attr(ob, "usr")
case_name = find_attr(ob, "case")
rs_list = get_server_list("rootserver")
ups_list = get_server_list("updateserver")
ms_list = get_server_list("mergeserver")
cs_list = get_server_list("chunkserver")
server_stats = ""
for ip in get_cluster_ips(*args, **ob):
sh(sub2('scp $usr@%s:$dir/server_stats %s/' % (ip, local_tmp_dir), ob))
server_stats += "<p>%s监控信息:<pre>%s</pre></p>" % (ip, read("%s/server_stats" % (local_tmp_dir)))
result_html_template = ("""
<p>测试人员:${user_name}<br>
测试Case:${case_name}<br>
运行环境:
<ul>
<li>RootServer: ${rs_list}</li>
<li>UpdateServer: ${ups_list}</li>
<li>MergeServer: ${ms_list}</li>
<li>ChunkServer: ${cs_list}</li>
</ul>
测试运行时间:${elapsed_seconds}s<br>
SQL请求次数:$sql_count<br>
集群QPS:$qps</p>
<p>QPS:<br>
<img src="cluster_qps.png" /></p>
<p>平均响应延时:${avg_sql_time}us<br>
<img src="cluster_total_time_dist.png" /></p>
${ms_prof_htmls}
${ms_gprof}
${cs_gprof}
${ups_gprof}
${server_stats}
<p>Server栈信息:
<ul>
<li><a href="ms0">ms0</a></li>
<li><a href="cs0">cs0</a></li>
<li><a href="ups0">ups0</a></li>
</ul></p>
""")
all_vars = copy.copy(ob)
all_vars.update(locals())
with open("%s/index.html" % (perf_result_dir), "w") as f:
f.write(sub2(result_html_template, all_vars))
with open("/home/yanran.hfs/public_html/ob_perf/not_published/index.html", "a") as f:
f.write("""<li><a href="%s/">%s %s %s</a></li>\n""" % (running_ts, running_ts, user_name, case_name))
sh("rm -r %s" % (local_tmp_dir))
return locals()
def perf_environ_setup():
#ob['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['gprofiler_output'] = "$dir/$ip.gprofiler.output"
#ob['environ_extras'] = "PROFILE_SAMPLE_INTERVAL=$ptime"
#ob['ms0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['ms0']['gprofiler_output'] = "$dir/ms0.gprofiler.output"
#ob['ms0']['environ_extras'] = sub2("PROFILEOUTPUT=$gprofiler_output PROFILE_SAMPLE_INTERVAL=$ptime", ob)
#ob['cs0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['cs0']['gprofiler_output'] = "$dir/cs0.gprofiler.output"
#ob['cs0']['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output"
#ob['ups0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['ups0']['gprofiler_output'] = "$dir/ups0.gprofiler.output"
#ob['ups0']['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output"
#obi_vars.update(dict(environ_extras = "PROFILE_SAMPLE_INTERVAL=$ptime"))
obi_vars['gprofiler_output'] = "$dir/$ip.$role.gprofiler.output"
obi_vars['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output PROFILE_SAMPLE_INTERVAL=$ptime"
obi_vars['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#call_(ob, 'ms0.kill', '-50')
def perf_install():
client_vars.update(dict_filter_out_special_attrs(perf_client_attrs()))
ct_vars.update(dict_filter_out_special_attrs(perf_ct_attrs()))
role_vars.update(dict_filter_out_special_attrs(perf_role_attrs()))
obi_vars.update(dict_filter_out_special_attrs(perf_obi_attrs()))
perf_environ_setup()
perf_install()
| gpl-2.0 |
pianomania/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
wolfiex/DSMACC-testing | mechanisms/reformat.py | 1 | 3586 | import pandas as pd
import numpy as np
import os, sys, multiprocessing, re, glob
from sympy import Symbol, expand, N
try:
ncores = int(os.popen(r'echo $NCPUS').read())
except Exception as e :
print e
ncores=1
print ncores
available_cores=ncores
co2 =False
if co2:
smilesdf = pd.read_csv('../src/background/smiles_mined.csv')
smiles=pd.Series(smilesdf.smiles)
smiles.index=smilesdf.name
smiles['CO']='C'
smiles['CO2']='C'
smiles['DUMMY']=''
smiles['NA']=''
smiles =dict(zip(smiles.index,[str(i).upper().count('C') for i in smiles]))
print 'THIS ADDS INORGANICS< DO NOT USE COMPLETE MECH'
print 'todo - check duplicate matches, then also check combinations'
try: filename1=sys.argv[1]
except:filename1 = '../src/background/mcm331complete.kpp'
full = tuple(open(filename1))
try: filename=sys.argv[2]
except: filename = '../src/background/inorganic_mcm.kpp'
inorganics = tuple(open(filename))
fullstr='~'.join(full+inorganics).replace('\n','').replace('\t','').replace(' ','')
eqn = [re.sub(r"[\r\s\n]*",'',i).split(':') for i in re.findall(r'(\{[\. \s\w\d]*\}.*\:*);\r*~' ,fullstr)]
combined = [i.replace('\t','').replace(' ','').replace('\n','') for i in full+inorganics]
def iseqn (x):
if (re.search(r'\{[\. \s\d]*\}', x)):
return True
combined1 = [i.split('}')[1].split(':') for i in filter(iseqn , combined)]
nocoeff = re.compile(r'\b\d*\.*\d')
def pool_eqn(x):
#sort the reactions
r,p=x[0].split('=')
p=p.split('+')
p.sort()
r=r.split('+')
r.sort()
if co2:
cdiff=sum([smiles[nocoeff.sub('',i)] for i in p])-sum([smiles[nocoeff.sub('',i)] for i in r])
if cdiff<0: p.extend(['CO2']*abs(cdiff))
else: p.extend(['CO2']*cdiff)
p='+'.join(p)
r='+'.join(r)
x[0] = r+'='+p
#replace D and exp for sympy re.sub(r'(\d)[dD]([+-\.\d])',r'\1e\2', x[1].split('//')[0].replace('EXP','exp')
x[1] = x[1].split('//')[0].replace(';','')
return x
eqn = multiprocessing.Pool(available_cores).map(pool_eqn,combined1)
nocoeff = re.compile(r'\b[\d\.]*(\w+)\b')
specs = []
if co2:specs=['CO2']
for e in eqn:
specs.extend(re.findall(r"[\w']+", e[0]))
specs = list(set([nocoeff.match(i).group(1) for i in specs]))
specs.sort()
string = '''// reformatted by reformat.py
// contact: [email protected]
// filedata: %s
// %s species %s reactions
''' %(filename1 + '+' + filename, len(specs),len(eqn))
print string
string += '''
#INLINE F90_GLOBAL
REAL(dp)::M, N2, O2, RO2, H2O
#ENDINLINE
#INLINE F90_RCONST
#ENDINLINE
#INCLUDE atoms
#DEFVAR
'''
for i in specs:
if i == 'DUMMY': continue
string += i+'=IGNORE;\n'
string +='''#INLINE F90_RCONST
USE constants
RO2 = 0'''
''' get RO2 from mechanism '''
dummy = False
ro2 = ''
for i in full:
if 'RO2 = &' in i: dummy = True
if 'CALL' in i: break
if dummy: ro2+= i
ro2 = re.findall('C\(ind_[A-z0-9]*\)',ro2)
r2=re.compile(r'_([A-z0-9]*)\)')
ro2 = [y for y in ro2 if r2.search(y).group(1) in specs]
for i in ro2:
string += '''&
+%s'''%i
string += '''
CALL mcm_constants(time, temp, M, N2, O2, RO2, H2O)
#ENDINLINE
#EQUATIONS
'''
for i,j in enumerate(eqn):
if j[0][-1]=='=':j[0]+='DUMMY'
string += '{%04d} %s : %s;\n'%(i,j[0],j[1].replace('\r',''))
string = re.sub(r';\h*;',';',string)
ic_file = filename1.replace('../InitCons/','').replace('.csv','').replace('../src/background/','')
with open("formatted_"+ic_file, 'w') as f:
f.write(string)
print "\n formatted_"+ic_file+' written'
| gpl-3.0 |
kgullikson88/General | AnalyseBstar.py | 1 | 45673 | """
This class performs the analysis of B stars to determine the following:
- vsini
-rv
-Teff
-logg
-vmacro
-vmicro
-[Fe/H]
Example usage:
--------------
Put usage example here
"""
# import matplotlib
#matplotlib.use("GTKAgg")
import scipy
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from scipy.interpolate import interp1d
from collections import defaultdict, namedtuple, deque
import os
from os.path import isfile
import warnings
import subprocess
import time
import matplotlib.pyplot as plt
import numpy as np
import DataStructures
import FittingUtilities
from astropy import units, constants
from astropy.io import fits
import HelperFunctions
import Correlate
import SpectralTypeRelations
import Broaden
import astrolib #Ian Crossfield's script for rv correction
# Define a structure to store my parameter information and associate chi-squared values
ParameterValues = namedtuple("ParameterValues", "Teff, logg, Q, beta, He, Si, vmacro, chisq")
class Analyse():
def __init__(self, gridlocation="/Volumes/DATADRIVE/Stellar_Models/BSTAR06", SpT=None, debug=False, fname=None,
Teff=None, logg=None, resolution=60000.0):
# Just define some class variables
self.gridlocation = gridlocation
self.debug = debug
self.windval2str = {-13.8: "A",
-14.0: "a",
-13.4: "B",
-13.6: "b",
-13.15: "C",
-12.7: "D",
-14.30: "O"}
self.grid = defaultdict(lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(DataStructures.xypoint)))))))))
self.Teffs = range(10000, 20000, 500) + range(20000, 31000, 1000)
self.Heliums = (0.1, 0.15, 0.2)
self.Silicons = (-4.19, -4.49, -4.79)
self.logQ = (-14.3, -14.0, -13.8, -13.6, -13.4, -13.15, -12.7)
self.betas = (0.9, 1.2, 1.5, 2.0, 3.0)
self.species = ['BR10', 'BR11', 'BRALPHA', 'BRBETA', 'BRGAMMA',
'HALPHA', 'HBETA', 'HDELTA', 'HEI170', 'HEI205',
'HEI211', 'HEI4010', 'HEI4026', 'HEI4120', 'HEI4140',
'HEI4387', 'HEI4471', 'HEI4713', 'HEI4922', 'HEI6678',
'HEII218', 'HEII4200', 'HEII4541', 'HEII4686', 'HEII57',
'HEII611', 'HEII6406', 'HEII6527', 'HEII6683', 'HEII67',
'HEII712', 'HEII713', 'HEPS', 'HGAMMA', 'PALPHA',
'PBETA', 'PF10', 'PF9', 'PFGAMMA', 'PGAMMA',
'SiII4128', 'SiII4130', 'SiII5041', 'SiII5056', 'SiIII4552',
'SiIII4567', 'SiIII4574', 'SiIII4716', 'SiIII4813', 'SiIII4819',
'SiIII4829', 'SiIII5739', 'SiIV4089', 'SiIV4116', 'SiIV4212',
'SiIV4950', 'SiIV6667', 'SiIV6701']
self.visible_species = {}
# Get spectral type if the user didn't enter it
if SpT is None:
SpT = raw_input("Enter Spectral Type: ")
self.SpT = SpT
#Use the spectral type to get effective temperature and log(g)
# (if those keywords were not given when calling this)
MS = SpectralTypeRelations.MainSequence()
if Teff is None:
Teff = MS.Interpolate(MS.Temperature, SpT)
if logg is None:
M = MS.Interpolate(MS.Mass, SpT)
R = MS.Interpolate(MS.Radius, SpT)
G = constants.G.cgs.value
Msun = constants.M_sun.cgs.value
Rsun = constants.R_sun.cgs.value
logg = np.log10(G * M * Msun / (R * Rsun ** 2))
self.Teff_guess = Teff
self.logg_guess = logg
self.vsini = 300
self.resolution = resolution
# Read the filename if it is given
self.data = None
if fname is not None:
self.InputData(fname)
# Initialize a figure for drawing
#plt.figure(1)
#plt.plot([1], [1], 'ro')
#plt.show(block=False)
#plt.cla()
def GetModel(self, Teff, logg, Q, beta, helium, silicon, species, vmacro, xspacing=None):
"""
This method takes the following values, and finds the closest match
in the grid. It will warn the user if the values are not actual
grid points, which they should be!
Parameters:
-----------
Teff: Effective temperature of the star (K)
Options: 10000-20000K in 500K steps, 20000-30000 in 1000K steps
logg: log of the surface gravity of the star (cgs)
Options: 4.5 to 4log(Teff) - 15.02 in 0.1 dex steps
Q: log of the wind strength logQ = log(Mdot (R * v_inf)^-1.5
Options: -14.3, -14.0, -13.8, -13.6, -13.4, -13.15, -12.7
beta: Wind velocity law for the outer expanding atmosphere
Options: 0.9, 1.2, 1.5, 2.0, 3.0
helium: Helium fraction of the star's atmsophere
Options: 0.10, 0.15, 0.20
silicon: Relative silicon abundance as log(nSi/nH)
Options: -4.19, -4.49, -4.79
vmacro: Macroturbulent velocity (km/s)
Options: 3,6,10,12,15 for Teff<20000
6,10,12,15,20 for Teff>20000
species: The name of the spectral line you want
Options: Many. Just check the model grid.
xspacing: An optional argument. If provided, we will resample the line
to have the given x-axis spacing.
"""
# Check to see if the input is in the grid
if Teff not in self.Teffs:
warnings.warn("Teff value (%g) not in model grid!" % Teff)
Teff = HelperFunctions.GetSurrounding(self.Teffs, Teff)[0]
print "\tClosest value is %g\n\n" % Teff
# logg and vmacro depend on Teff, so make those lists now
loggs = [round(g, 2) for g in np.arange(4.5, 4 * np.log10(Teff) - 15.02, -0.1)]
if Teff < 20000:
self.vmacros = (3, 6, 10, 12, 15)
else:
self.vmacros = (6, 10, 12, 15, 20)
# Continue checking if the inputs are on a grid point
if logg not in loggs:
warnings.warn("log(g) value (%g) not in model grid!" % logg)
logg = HelperFunctions.GetSurrounding(loggs, logg)[0]
print "\tClosest value is %g\n\n" % logg
if Q not in self.logQ:
warnings.warn("log(Q) wind value (%g) not in model grid!" % Q)
Q = HelperFunctions.GetSurrounding(self.logQ, Q)[0]
print "\tClosest value is %g\n\n" % Q
if beta not in self.betas:
warnings.warn("Beta value (%g) not in model grid!" % beta)
beta = HelperFunctions.GetSurrounding(self.betas, beta)[0]
print "\tClosest value is %g\n\n" % beta
if helium not in self.Heliums:
warnings.warn("Helium fraction (%g) not in model grid!" % helium)
helium = HelperFunctions.GetSurrounding(self.Heliums, helium)[0]
print "\tClosest value is %g\n\n" % helium
if silicon not in self.Silicons:
warnings.warn("Silicon relative abundance (%g) not in model grid!" % silicon)
silicon = HelperFunctions.GetSurrounding(self.Silicons, silicon)[0]
print "\tClosest value is %g\n\n" % silicon
if species not in self.species:
raise ValueError("Desired species ( %s ) not in grid!" % species)
if vmacro not in self.vmacros:
warnings.warn("Macroturbulent velocity (%g) not in model grid!" % vmacro)
vmacro = HelperFunctions.GetSurrounding(self.vmacros, vmacro)[0]
print "\tClosest value is %g\n\n" % vmacro
# Now, make the filename that this model (or the closest one) is at
windstr = self.windval2str[Q]
abstr = "He%iSi%i" % (helium * 100, -silicon * 100)
fname = "%s/T%i/g%i/%s%.2i/%s/OUT.%s_VT%.3i.gz" % (
self.gridlocation, Teff, logg * 10, windstr, beta * 10, abstr, species, vmacro)
if not isfile(fname):
warnings.warn("File %s not found! Skipping. This could cause errors later!" % fname)
return DataStructures.xypoint(x=np.arange(300, 1000, 10))
# Gunzip that file to a temporary one.
tmpfile = "tmp%f" % time.time()
lines = subprocess.check_output(['gunzip', '-c', fname])
output = open(tmpfile, "w")
output.writelines(lines)
output.close()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x, y = np.genfromtxt(tmpfile, invalid_raise=False, usecols=(2, 4), unpack=True)
#Removes NaNs from random extra lines in the files...
while np.any(np.isnan(y)):
x = x[:-1]
y = y[:-1]
# Check for duplicate x points (why does this grid suck so much?!)
xdiff = np.array([x[i + 1] - x[i] for i in range(x.size - 1)])
goodindices = np.where(xdiff > 1e-7)[0]
x = x[goodindices]
y = y[goodindices]
#Convert from angstrom to nm, and switch to air wavelengths
x = x * units.angstrom.to(units.nm) / 1.00026
# delete the temporary file
subprocess.check_call(['rm', tmpfile])
if xspacing is not None:
modelfcn = spline(x, y, k=1)
x = np.arange(x[0], x[-1] + xspacing, xspacing)
y = modelfcn(x)
return DataStructures.xypoint(x=x, y=y)
def InputData(self, fname, resample=True):
"""
This takes a fits file, and reads it as a bunch of echelle orders.
It also saves the header for later use.
If resample==True, it will resample the x-axis to a constant spacing
"""
orders = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="flux", cont="continuum",
errors="error")
for i, order in enumerate(orders):
orders[i].err = np.sqrt(order.y)
self.data = orders
if resample:
for i, order in enumerate(self.data):
self.data[i] = self._resample(order)
self.fname = fname.split("/")[-1]
hdulist = fits.open(fname)
self.headers = []
for i in range(len(hdulist)):
self.headers.append(hdulist[i].header)
return
def FindVsini(self, vsini_lines="%s/School/Research/Useful_Datafiles/vsini.list" % os.environ["HOME"]):
"""
This function will read in the linelist useful for determining vsini.
For each one, it will ask the user if the line is usable by bringing up
a plot of the appropriate order in the data. If the user says it is, then
the vsini is determined as in Simon-Diaz (2007).
"""
# First, check to make sure the user entered a datafile
if self.data is None:
fname = raw_input("Enter filename for the data: ")
self.InputData(fname)
# Read in the vsini linelist file
center, left, right = np.loadtxt(vsini_lines, usecols=(1, 2, 3), unpack=True)
center /= 10.0
left /= 10.0
right /= 10.0
# Find each line in the data
plt.figure(1)
vsini_values = []
for c, l, r in zip(center, left, right):
found = False
for order in self.data:
if order.x[0] < c < order.x[-1]:
found = True
break
if not found:
continue
first = np.searchsorted(order.x, l)
last = np.searchsorted(order.x, r)
segment = order[first:last]
segment.cont = FittingUtilities.Continuum(segment.x, segment.y, fitorder=1, lowreject=1, highreject=5)
segment.y /= segment.cont
plt.plot(segment.x, segment.y)
yrange = plt.gca().get_ylim()
plt.plot((c, c), yrange, 'r--', lw=2)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Relative flux")
plt.draw()
valid = raw_input("Is this line usable for vsini determination (y/n)? ")
if "n" in valid.lower():
plt.cla()
continue
# Ask if the continuum needs to be renormalized
done = False
while not done:
renorm = raw_input("Renormalize continuum (y/n)? ")
if "y" in renorm.lower():
plt.cla()
segment = self.UserContinuum(segment)
plt.plot(segment.x, segment.y)
plt.plot(segment.x, segment.cont)
plt.draw()
else:
segment.y /= segment.cont
done = True
# Fourier transform the line, and let the user click on the first minimum
plt.cla()
vsini = self.UserVsini(segment)
vsini_values.append(vsini)
plt.cla()
# Save the mean and standard deviation in the file 'vsini.dat'
outfile = open("vsini.dat", "a")
outfile.write("%s%.2f\t%.3f\n" % (self.fname.ljust(20), np.mean(vsini_values), np.std(vsini_values)))
self.vsini = np.mean(vsini_values)
return
def CorrectVelocity(self, rvstar=0.0, bary=True, resample=True):
"""
This function corrects for the radial velocity of the star.
- rvstar: the radial velocity of the star, in heliocentric velocity km/s
- bary: a bool variable to decide whether the barycentric velocity
should be corrected. If true, it uses the header from the
data most recently read in.
- resample: a bool variable to decide whether to resample
the data into a constant wavelength spacing
after doing the correction
"""
# First, check to make sure the user entered a datafile
if self.data is None:
fname = raw_input("Enter filename for the data: ")
self.InputData(fname)
rv = rvstar
if bary:
header = self.headers[0]
jd = header['jd']
observatory = header['observat']
if "MCDONALD" in observatory:
latitude = 30.6714
longitude = 104.0225
altitude = 2070.0
elif "CTIO" in observatory:
latitude = -30.1697
longitude = 70.8065
altitude = 2200.0
ra = header['ra']
dec = header['dec']
ra_seg = ra.split(":")
dec_seg = dec.split(":")
ra = float(ra_seg[0]) + float(ra_seg[1]) / 60.0 + float(ra_seg[2]) / 3600.0
dec = float(dec_seg[0]) + float(dec_seg[1]) / 60.0 + float(dec_seg[2]) / 3600.0
rv += astrolib.helcorr(longitude, latitude, altitude, ra, dec, jd, debug=self.debug)[0]
c = constants.c.cgs.value * units.cm.to(units.km)
for i, order in enumerate(self.data):
order.x *= (1.0 + rv / c)
if resample:
self.data[i] = self._resample(order)
else:
self.data[i] = order
return
def GridSearch(self, windguess=-14.3, betaguess=0.9):
"""
This method will do the actual search through the grid, tallying the chi-squared
value for each set of parameters. The guess parameters are determined from the
spectral type given in the __init__ call to this class.
It does the grid search in a few steps. First, it determines the best Teff
and logg for the given wind and metallicity guesses (which default to solar
metallicity and no wind). Then, it searches the subgrid near the best Teff/logg
to nail down the best metallicity, silicon value, and macroturbelent velocity
- windguess is the guess value for the wind. If not given, it defaults to no wind
- betaguess is the guess value for the wind velocity parameter 'beta'. Ignored
if windguess is None; otherwise it MUST be given!
It will return the best-fit parameters, as well as the list of
parameters tested and their associated chi-squared values
"""
# First, check to make sure the user entered a datafile
if self.data is None:
fname = raw_input("Enter filename for the data: ")
self.InputData(fname)
#Now, find the spectral lines that are visible in this data.
self._ConnectLineToOrder()
# Find the best log(g) for the guess temperature
bestlogg, dlogg, seplogg = self._FindBestLogg(self.Teff_guess, self.logg_guess, windguess, betaguess, 0.1,
-4.49, 10.0)
"""
Start loop for determination of Teff, Si-abundance, and microturbulence
"""
# Find the best Teff and log(g)
if windguess is None:
Teff, logg, parlist = self._FindBestTemperature(self.Teff_guess, self.logg_guess, -14.3, 0.9, 0.1, -4.49,
10.0)
else:
Teff, logg, parlist = self._FindBestTemperature(self.Teff_guess, self.logg_guess, windguess, betaguess, 0.1,
-4.49, 10.0)
print Teff, logg
print parlist
self.parlist = parlist #TEMPORARY! REMOVE WHEN I AM DONE WITH THIS FUNCTION!
#For Teff and logg close to the best ones, find the best other parameters (search them all?)
tidx = np.argmin(abs(np.array(self.Teffs) - Teff))
for i in range(max(0, tidx - 1), min(len(self.Teffs), tidx + 2)):
T = self.Teffs[i]
loggs = np.array([round(g, 2) for g in np.arange(4.5, 4 * np.log10(T) - 15.02, -0.1)])
gidx = np.argmin(abs(loggs - logg))
for j in range(max(0, gidx - 1), min(len(loggs), gidx + 2)):
pars = self._FitParameters(T, loggs[j], parlist)
self.parlist = parlist #TEMPORARY! REMOVE WHEN I AM DONE WITH THIS FUNCTION!
def _ConnectLineToOrder(self, force=False):
"""
This private method is to determine which lines exist in the data,
and in what spectral order they are. It is called right before starting
the parameter search, in order to minimize the number of models we need
to read in.
If force==True, then we will do this whether or not it was already done
"""
# Don't need to do this if we already have
if len(self.visible_species.keys()) > 0 and not force:
print "Already connected lines to spectral orders. Not repeating..."
return
species = {}
Teff = self.Teffs[4]
logg = [round(g, 2) for g in np.arange(4.5, 4 * np.log10(Teff) - 15.02, -0.1)][0]
for spec in self.species:
print "\nGetting model for %s" % spec
model = self.GetModel(Teff,
logg,
-14.3,
0.9,
0.1,
-4.49,
spec,
10)
# Find the appropriate order
w0 = (model.x[0] + model.x[-1]) / 2.0
idx = -1
diff = 9e9
for i, order in enumerate(self.data):
x0 = (order.x[0] + order.x[-1]) / 2.0
if abs(x0 - w0) < diff and order.x[0] < w0 < order.x[-1]:
diff = abs(x0 - w0)
idx = i
if idx < 0 or (idx == i and diff > 10.0):
continue
species[spec] = idx
self.visible_species = species
return
def _FindBestLogg(self, Teff, logg_values, wind, beta, He, Si, vmacro, vmicro):
"""
This semi-private method finds the best log(g) value for specific values of
the other parameters. It does so by fitting the Balmer line wings
"""
xlims = {"HGAMMA": [430.0, 434.047, 438.0],
"HDELTA": [406.0, 410.174, 414.0],
"HBETA": [480.0, 486.133, 492.0]}
species = ["HGAMMA", "HDELTA"]
if wind < -13.8:
species.append("HBETA")
# Begin chi-squared loop
chisquared = [[] for i in range(len(species))]
loggs_tested = [[] for i in range(len(species))]
for i, spec in enumerate(species):
if spec not in self.visible_species.keys():
continue
order = self.data[self.visible_species[spec]]
xlow, lambda0, xhigh = xlims[spec]
#We don't want to include the inner region in the fit.
delta = self._GetDelta(order, lambda0, Teff, self.vsini, vmacro, vmicro)
goodindices = np.where(np.logical_or(order.x < lambda0 - delta,
order.x > lambda0 + delta))[0]
waveobs = order.x[goodindices]
fluxobs = (order.y / order.cont)[goodindices]
errorobs = (order.err / order.cont)[goodindices]
# Further reduce the region to search so that it is between xlow and xhigh
goodindices = np.where(np.logical_and(waveobs > xlow,
waveobs < xhigh))[0]
waveobs = waveobs[goodindices]
fluxobs = fluxobs[goodindices]
errorobs = errorbs[goodindices]
# Loop over logg values
lineprofiles = []
for logg in logg_values:
model = self.GetModel(Teff,
logg,
wind,
beta,
He,
Si,
spec,
vmicro,
xspacing=order.x[1] - order.x[0])
model = Broaden.RotBroad(model, vsini * units.km.to(units.cm))
model = Broaden.MacroBroad(model, vmacro)
model = Broaden.ReduceResolution(model, self.resolution)
model = FittingUtilities.RebinData(model, waveobs)
lineprofiles.append(model)
# Get the chi-squared for this data
chi2 = self._GetChiSquared(waveobs, fluxobs, errorobs, model)
chisquared[i].append(chi2)
loggs_tested[i].append(logg)
# Find the best chi-squared, summed over the lines considered, and the best individual one
chisquared = np.array(chisquared)
bestlogg = logg_values[np.argmin(chisquared[i])]
separate_best = np.argmin(chisquared[i])
separate_bestlogg[i] = logg_values[separate_best]
# Find where there are large deviations (other lines)
modelflux = lineprofiles[separate_best]
sigma = np.std(modelflux - fluxobs)
good = np.where(abs(modelflux - fluxobs) < 3.0 * sigma)
waveobs = waveobs[good]
fluxobs = fluxobs[good]
errorobs = errorobs[good]
j = 0
for logg, profile in zip(logg_values, lineprofiles):
chi2 = self._GetChiSquared(waveobs, fluxobs, errorobs, profile)
chisquared[i][j] = chi2
j += 1
bestlogg = logg_values[np.argmin(chisquared[i])]
separate_best = np.argmin(chisquared[i])
separate_bestlogg[i] = logg_values[separate_best]
total = np.sum(chisquared, index=0)
separate_bestlogg = np.array(separate_bestlogg)
# Find the best logg over all lines considered
best = np.argmin(total)
bestgrav = logg_values[best]
loggmin = min(separate_bestlogg)
loggmax = max(separate_bestlogg)
# Determine the error the the logg-determination as the
# maximal deviation between the separately determined
# loggs and the general best matching one
deltalogg_minus = np.sqrt((bestgrav - loggmin) ** 2 + sigma ** 2)
deltalogg_plus = np.sqrt((bestgrav - loggmax) ** 2 + sigma ** 2)
deltalogg = max(0.5, deltalogg_minus, deltalogg_plus)
return [bestgrav, deltalogg, separate_bestlogg]
def _GetChiSquared(self, waveobs, fluxobs, errorobs, model):
"""
This private method determines the log-likelihood of the data
given the model.
"""
# Make sure the observation and model overlap
goodindices = np.where(np.logical_and(waveobs > model.x[0], waveobs < model.x[-1]))[0]
wavecompare = waveobs[goodindices]
fluxcompare = fluxobs[goodindices]
errorcompare = errorobs[goodindices]
# Interpolate model onto the same wavelength grid as the data
model = FittingUtilities.RebinData(model, wavecompare)
# Let the x-axis shift by +/- 5 pixels
chisq = []
for shift in range(-5, 6):
flux = self._shift(fluxcompare, shift)
error = self._shift(errorcompare, shift)
chisq.append(((flux - model.y) / error) ** 2)
return min(chisq)
def _shift(self, array, amount):
"""
Shifts array by amount indices. Uses collections.deque objects to do so efficiently
"""
array = deque(array)
return list(array.rotate(amount))
def _GetDelta(self, order, lambda0, Teff, vsini, vmacro, vmicro, minmax="min"):
"""
This private method finds the inner region of a line to ignore
in the logg fit to the line wings
"""
# FHWM
idx = np.argmin(abs(order.x - lambda0))
flux0 = order.y[idx] / order.cont[idx]
fluxhalf = 0.5(1.0 + flux0)
idx = max(np.where(np.logical_and(order.x / order.y > fluxhalf, order.x < lambda0))[0])
waveblue = order.x[idx]
idx = min(np.where(np.logical_and(order.x / order.y > fluxhalf, order.x > lambda0))[0])
wavered = order.x[idx]
delta1 = min(lambda0 - waveblue, wavered - lambda0)
# vsini and macroturbulent velocity
c = constants.c.cgs.value * units.cm.to(units.km)
delta2 = (vsini + vmacro) / (2.0 * c) * lambda0
# thermal velocity
mp = constants.m_p.cgs.value
kB = constants.k_B.cgs.value
vth_square = 2 * kB * Teff / mp
vmic = vmicro * 10 ** 5
vtherm = np.sqrt(vth_square + vmic ** 2)
delta3 = 3 * vtherm * 10 ** -5 / c * lambda0
if minmax.lower == "min":
return min(delta1, delta2, delta3)
elif minmax.lower == "max":
return max(delta1, delta2, delta3)
def _FindBestTemperature(self, Teff_guess, logg_guess, wind, beta, He, Si, vmicro, vmacro):
"""
This semi-private method determines the best temperature and log(g) values,
given specific values for the wind, metallicity, and macroturbulent velocity
parameters.
"""
# Keep a list of the parameters and associated chi-squared values
pars = []
# Set the range in temperature and log(g) to search
dT = 2000
dlogg = 1.5
# Determine range of temperatures to search
Teff_low = HelperFunctions.GetSurrounding(self.Teffs, Teff_guess - dT)[0]
Teff_high = HelperFunctions.GetSurrounding(self.Teffs, Teff_guess + dT)[0]
first = self.Teffs.index(Teff_low)
last = self.Teffs.index(Teff_high)
if last < len(self.Teffs) - 1:
last += 1
# Define the species to search
xlims = {"HGAMMA": [430.0, 434.047, 438.0],
"HDELTA": [406.0, 410.174, 414.0],
"HBETA": [480.0, 486.133, 492.0]}
species = ["HGAMMA", "HDELTA"]
if wind < -13.8:
species.append("HBETA")
# Begin loop over temperatures
chisquared = {}
loggvals = []
loggerrs = []
for Teff in self.Teffs[first:last]:
if self.debug:
print "T = %g" % Teff
loggs = [round(g, 2) for g in np.arange(4.5, 4 * np.log10(Teff) - 15.02, -0.1)][::-1]
logg_low = HelperFunctions.GetSurrounding(loggs, self.logg_guess - dlogg)[0]
logg_high = HelperFunctions.GetSurrounding(loggs, self.logg_guess + dlogg)[0]
first2 = loggs.index(logg_low)
last2 = loggs.index(logg_high)
if last2 < len(loggs) - 1:
last2 += 1
# Do the search over log(g) for this temperature
bestgrav, deltalogg, separate_bestlogg = self._FindBestLogg(Teff,
logs[first2:last2],
wind,
beta,
He,
Si,
vmacro,
vmicro)
#pars_temp = self._FindBestLogg(Teff, loggs[first2:last2], wind, beta, He, Si, vmacro)
loggvals.append(bestgrav)
loggerrs.append(deltalogg)
for spec in species:
if spec not in self.visible_species.keys():
continue
order = self.data[self.visible_species[spec]]
xlow, lambda0, xhigh = xlims[spec]
#We want to include ONLY the inner region in the fit.
delta = self._GetDelta(order, lambda0, Teff, self.vsini, vmacro, vmicro, minmax="max")
goodindices = np.where(np.logical_or(order.x >= lambda0 - delta,
order.x <= lambda0 + delta))[0]
waveobs = order.x[goodindices]
fluxobs = (order.y / order.cont)[goodindices]
errorobs = (order.err / order.cont)[goodindices]
# Further reduce the region to search so that it is between xlow and xhigh
goodindices = np.where(np.logical_and(waveobs > xlow,
waveobs < xhigh))[0]
waveobs = waveobs[goodindices]
fluxobs = fluxobs[goodindices]
errorobs = errorbs[goodindices]
# Generate the model
model = self.GetModel(Teff,
bestlogg,
wind,
beta,
He,
Si,
spec,
vmicro,
xspacing=order.x[1] - order.x[0])
model = Broaden.RotBroad(model, vsini * units.km.to(units.cm))
model = Broaden.MacroBroad(model, vmacro)
model = Broaden.ReduceResolution(model, self.resolution)
model = FittingUtilities.RebinData(model, waveobs)
# Get the chi-squared for this data
chi2 = self._GetChiSquared(waveobs, fluxobs, errorobs, model)
chisquared[spec].append(chi2)
# Now, find the best chi-squared value
# First, the single best one:
chi2arr = []
for spec in species:
if spec not in self.visible_species.keys():
continue
chi2arr.append(chisquared[spec])
idx = np.argmin(chi2arr) % int(last - first)
bestindividual = self.Teffs[first + idx]
# Now, the best one summed over the lines
idx = np.argmin(np.sum(chi2arr, axis=0))
bestT = self.Teffs[first + idx]
return bestT, loggvals[idx], loggerrs[idx]
def _FitParameters(self, Teff, logg, parlist):
"""
This method takes a specific value of Teff and logg, and
searches through the wind parameters, the metallicities, and
the macroturbulent velocities.
-Teff: the effective temperature to search within
-logg: the log(g) to search within
-parlist: the list of parameters already searched. It will not
duplicate already searched parameters
"""
if Teff < 20000:
vmacros = (3, 6, 10, 12, 15)
else:
vmacros = (6, 10, 12, 15, 20)
for He in self.Heliums:
if self.debug:
print "Helium fraction = %g" % He
for Si in self.Silicons:
if self.debug:
print "Log(Silicon abundance) = %g" % Si
for Q in self.logQ[:4]:
if self.debug:
print "Wind speed parameter = %g" % Q
print "test"
for beta in self.betas[:4]:
if self.debug:
print "Wind velocity scale parameter (beta) = %g" % beta
for vmacro in vmacros:
if self.debug:
print "Macroturbulent velocity = %g" % vmacro
# Check if this is already in the parameter list
done = False
for p in parlist:
if p.Teff == Teff and p.logg == logg and p.He == He and p.Si == Si and p.Q == Q and p.beta == beta and p.vmacro == vmacro:
done = True
if done:
continue
chisq = 0.0
normalization = 0.0
for spec in self.visible_species.keys():
print "\t\t", spec
order = self.data[self.visible_species[spec]]
model = self.GetModel(Teff,
logg,
-14.3,
0.9,
0.1,
-4.49,
spec,
10,
xspacing=order.x[1] - order.x[0])
model = Broaden.RotBroad(model, self.vsini * units.km.to(units.cm))
model = Broaden.ReduceResolution(model, 60000.0)
model = FittingUtilities.RebinData(model, order.x)
chisq += np.sum((order.y - model.y * order.cont) ** 2 / order.err ** 2)
normalization += float(order.size())
p = ParameterValues(Teff, logg, Q, beta, He, Si, vmacro, chisq / (normalization - 7.0))
parlist.append(p)
return parlist
def GetRadialVelocity(self):
"""
DO NOT USE THIS! IT DOESN'T WORK VERY WELL, AND THE 'CorrectVelocity'
METHOD SHOULD WORK WELL ENOUGH FOR WHAT I NEED!
This function will get the radial velocity by cross-correlating a model
of the star against all orders of the data. The maximum of the CCF will
likely be broad due to rotational broadening, but will still encode the
rv of the star (plus Earth, if the observations are not barycentric-corrected)
"""
# Get all of the models with the appropriate temperature and log(g)
# We will assume solar abundances of everything, and no wind for this
xgrid = np.arange(self.data[0].x[0] - 20, self.data[-1].x[-1] + 20, 0.01)
full_model = DataStructures.xypoint(x=xgrid, y=np.ones(xgrid.size))
Teff = HelperFunctions.GetSurrounding(self.Teffs, self.Teff_guess)[0]
loggs = [round(g, 2) for g in np.arange(4.5, 4 * np.log10(Teff) - 15.02, -0.1)]
logg = HelperFunctions.GetSurrounding(loggs, self.logg_guess)[0]
corrlist = []
normalization = 0.0
species = ['BRALPHA', 'BRBETA', 'BRGAMMA',
'HALPHA', 'HBETA', 'HDELTA', 'HGAMMA']
for spec in species:
print "\nGetting model for %s" % spec
model = self.GetModel(Teff,
logg,
-14.3,
0.9,
0.1,
-4.49,
spec,
10)
# Find the appropriate order
w0 = (model.x[0] + model.x[-1]) / 2.0
idx = -1
diff = 9e9
for i, order in enumerate(self.data):
x0 = (order.x[0] + order.x[-1]) / 2.0
if abs(x0 - w0) < diff and order.x[0] < w0 < order.x[-1]:
diff = abs(x0 - w0)
idx = i
if idx < 0 or (idx == i and diff > 10.0):
continue
order = self.data[idx]
# Make sure the model is bigger than this order
if model.x[0] > order.x[0] - 5.0:
model.x = np.r_[(order.x[0] - 5.0,), model.x]
model.y = np.r_[(1.0,), model.y]
if model.x[-1] < order.x[-1] + 5.0:
model.x = np.r_[model.x, (order.x[-1] + 5.0,)]
model.y = np.r_[model.y, (1.0,)]
model.cont = np.ones(model.x.size)
# Rotationally broaden model
xgrid = np.arange(model.x[0], model.x[-1], 0.001)
model = FittingUtilities.RebinData(model, xgrid)
model = Broaden.RotBroad(model, self.vsini * units.km.to(units.cm))
# Find low point:
idx = np.argmin(model.y)
w0 = model.x[idx]
idx = np.argmin(order.y / order.cont)
x0 = order.x[idx]
print "Model wavelength = %.5f" % w0
print "Data wavelength = %.5f" % x0
print "Velocity shift = %g km/s" % (3e5 * (x0 - w0) / w0)
# Rebin data to constant (log) spacing
start = np.log(order.x[0])
end = np.log(order.x[-1])
neworder = order.copy()
neworder.x = np.logspace(start, end, order.size(), base=np.e)
neworder = FittingUtilities.RebinData(order, neworder.x)
# Rebin the model to the same spacing
logspacing = np.log(neworder.x[1] / neworder.x[0])
left = np.searchsorted(model.x, order.x[0] - 10)
right = np.searchsorted(model.x, order.x[-1] + 10)
right = min(right, model.size() - 2)
left, right = 0, -1
start = np.log(model.x[left])
end = np.log(model.x[right])
xgrid = np.exp(np.arange(start, end + logspacing * 1.1, logspacing))
segment = FittingUtilities.RebinData(model, xgrid)
plt.figure(3)
plt.plot(neworder.x, neworder.y / neworder.cont)
plt.plot(segment.x, segment.y / segment.cont)
corr = Correlate.Correlate([neworder, ], [segment, ], debug=True)
plt.figure(2)
plt.plot(corr.x, corr.y)
if not np.any(np.isnan(corr.y)):
corrlist.append(corr)
normalization += float(order.size())
#fcn = interp1d(model.x, model.y, kind='linear', bounds_error=False, fill_value=1.0)
#full_model.y *= fcn(full_model.x)
#plt.plot(model.x, model.y)
#plt.plot(full_model.x, full_model.y)
#plt.show()
#output = Correlate.GetCCF(self.data, full_model, vsini=0.0, resolution=60000, process_model=True, rebin_data=True, debug=True)
#ccf = output["CCF"]
#plt.plot(ccf.x, ccf.y)
#idx = np.argmax(ccf.y)
#print "Maximum CCF at %g km/s" %(ccf.x[idx])
#plt.show()
# Add up the individual CCFs (use the Maximum Likelihood method from Zucker 2003, MNRAS, 342, 1291)
total = corrlist[0].copy()
total.y = np.ones(total.size())
for i, corr in enumerate(corrlist):
correlation = spline(corr.x, corr.y, k=1)
N = self.data[i].size()
total.y *= np.power(1.0 - correlation(total.x) ** 2, float(N) / normalization)
master_corr = total.copy()
master_corr.y = 1.0 - np.power(total.y, 1.0 / float(len(corrlist)))
idx = np.argmax(master_corr.y)
rv = master_corr.x[idx]
print "Radial velocity = %g km/s" % rv
plt.figure(1)
plt.plot(master_corr.x, master_corr.y, 'k-')
plt.xlabel("Velocity (km/s)")
plt.ylabel("CCF")
plt.show()
return rv
def UserContinuum(self, spectrum):
"""
This will let the user click twice to define continuum points, and
will then fit a straight line through the points as the continuum vector.
Expects a short segment of spectrum, such that the continuum is quite linear.
"""
self.interactive_mode = "continuum"
fig = plt.figure(1)
cid = fig.canvas.mpl_connect('button_press_event', self.mouseclick)
self.clicks = []
plt.plot(spectrum.x, spectrum.y)
plt.draw()
plt.waitforbuttonpress()
plt.waitforbuttonpress()
fig.canvas.mpl_disconnect(cid)
plt.cla()
# Once we get here, the user has clicked twice
for click in self.clicks:
print click.xdata, "\t", click.ydata
slope = (self.clicks[1].ydata - self.clicks[0].ydata) / (self.clicks[1].xdata - self.clicks[0].xdata)
spectrum.cont = self.clicks[0].ydata + slope * (spectrum.x - self.clicks[0].xdata)
return spectrum
def UserVsini(self, spectrum):
"""
This does a Fourier transform on the spectrum, and then lets
the user click on the first minimum, which indicates the vsini of the star.
"""
# Set up plotting
self.interactive_mode = "vsini"
fig = plt.figure(1)
cid = fig.canvas.mpl_connect('button_press_event', self.mouseclick)
# Make wavelength spacing uniform
xgrid = np.linspace(spectrum.x[0], spectrum.x[-1], spectrum.size())
spectrum = FittingUtilities.RebinData(spectrum, xgrid)
extend = np.array(40 * spectrum.size() * [1, ])
spectrum.y = np.r_[extend, spectrum.y, extend]
# Do the fourier transorm and keep the positive frequencies
fft = np.fft.fft(spectrum.y - 1.0)
freq = np.fft.fftfreq(spectrum.y.size, d=spectrum.x[1] - spectrum.x[0])
good = np.where(freq > 0)[0]
fft = fft[good].real ** 2 + fft[good].imag ** 2
freq = freq[good]
# Plot inside a do loop, to let user try a few times
done = False
trials = []
plt.loglog(freq, fft)
plt.xlim((1e-2, 10))
plt.draw()
for i in range(10):
plt.waitforbuttonpress()
sigma_1 = self.click.xdata
if self.click.button == 1:
c = constants.c.cgs.value * units.cm.to(units.km)
vsini = 0.66 * c / (spectrum.x.mean() * sigma_1)
print "vsini = ", vsini, " km/s"
trials.append(vsini)
plt.cla()
else:
done = True
break
fig.canvas.mpl_disconnect(cid)
if len(trials) == 1:
return trials[0]
print "\n"
for i, vsini in enumerate(trials):
print "\t[%i]: vsini = %.1f km/s" % (i + 1, vsini)
inp = raw_input("\nWhich vsini do you want to use (choose from the options above)? ")
return trials[int(inp) - 1]
def mouseclick(self, event):
"""
This is a generic mouseclick method. It will act differently
based on what the value of self.interactive_mode is.
"""
if self.interactive_mode == "continuum":
if len(self.clicks) < 2:
plt.plot(event.xdata, event.ydata, 'rx', markersize=12)
plt.draw()
self.clicks.append(event)
elif self.interactive_mode == "vsini":
self.click = event
return
def _resample(self, order):
"""
Semi-private method to resample an order to a constant wavelength spacing
"""
xgrid = np.linspace(order.x[0], order.x[-1], order.size())
return FittingUtilities.RebinData(order, xgrid)
| gpl-3.0 |
vortex-ape/scikit-learn | examples/preprocessing/plot_all_scaling.py | 4 | 14019 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============================================================
Compare the effect of different scalers on data with outliers
=============================================================
Feature 0 (median income in a block) and feature 5 (number of households) of
the `California housing dataset
<http://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html>`_ have very
different scales and contain some very large outliers. These two
characteristics lead to difficulties to visualize the data and, more
importantly, they can degrade the predictive performance of many machine
learning algorithms. Unscaled data can also slow down or even prevent the
convergence of many gradient-based estimators.
Indeed many estimators are designed with the assumption that each feature takes
values close to zero or more importantly that all features vary on comparable
scales. In particular, metric-based and gradient-based estimators often assume
approximately standardized data (centered features with unit variances). A
notable exception are decision tree-based estimators that are robust to
arbitrary scaling of the data.
This example uses different scalers, transformers, and normalizers to bring the
data within a pre-defined range.
Scalers are linear (or more precisely affine) transformers and differ from each
other in the way to estimate the parameters used to shift and scale each
feature.
``QuantileTransformer`` provides non-linear transformations in which distances
between marginal outliers and inliers are shrunk. ``PowerTransformer`` provides
non-linear transformations in which data is mapped to a normal distribution to
stabilize variance and minimize skewness.
Unlike the previous transformations, normalization refers to a per sample
transformation instead of a per feature transformation.
The following code is a bit verbose, feel free to jump directly to the analysis
of the results_.
"""
# Author: Raghav RV <[email protected]>
# Guillaume Lemaitre <[email protected]>
# Thomas Unterthiner
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
from sklearn.datasets import fetch_california_housing
print(__doc__)
dataset = fetch_california_housing()
X_full, y_full = dataset.data, dataset.target
# Take only 2 features to make visualization easier
# Feature of 0 has a long tail distribution.
# Feature 5 has a few but very large outliers.
X = X_full[:, [0, 5]]
distributions = [
('Unscaled data', X),
('Data after standard scaling',
StandardScaler().fit_transform(X)),
('Data after min-max scaling',
MinMaxScaler().fit_transform(X)),
('Data after max-abs scaling',
MaxAbsScaler().fit_transform(X)),
('Data after robust scaling',
RobustScaler(quantile_range=(25, 75)).fit_transform(X)),
('Data after power transformation (Yeo-Johnson)',
PowerTransformer(method='yeo-johnson').fit_transform(X)),
('Data after power transformation (Box-Cox)',
PowerTransformer(method='box-cox').fit_transform(X)),
('Data after quantile transformation (gaussian pdf)',
QuantileTransformer(output_distribution='normal')
.fit_transform(X)),
('Data after quantile transformation (uniform pdf)',
QuantileTransformer(output_distribution='uniform')
.fit_transform(X)),
('Data after sample-wise L2 normalizing',
Normalizer().fit_transform(X)),
]
# scale the output between 0 and 1 for the colorbar
y = minmax_scale(y_full)
# plasma does not exist in matplotlib < 1.5
cmap = getattr(cm, 'plasma_r', cm.hot_r)
def create_axes(title, figsize=(16, 6)):
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
# define the axis for the first plot
left, width = 0.1, 0.22
bottom, height = 0.1, 0.7
bottom_h = height + 0.15
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter = plt.axes(rect_scatter)
ax_histx = plt.axes(rect_histx)
ax_histy = plt.axes(rect_histy)
# define the axis for the zoomed-in plot
left = width + left + 0.2
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter_zoom = plt.axes(rect_scatter)
ax_histx_zoom = plt.axes(rect_histx)
ax_histy_zoom = plt.axes(rect_histy)
# define the axis for the colorbar
left, width = width + left + 0.13, 0.01
rect_colorbar = [left, bottom, width, height]
ax_colorbar = plt.axes(rect_colorbar)
return ((ax_scatter, ax_histy, ax_histx),
(ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom),
ax_colorbar)
def plot_distribution(axes, X, y, hist_nbins=50, title="",
x0_label="", x1_label=""):
ax, hist_X1, hist_X0 = axes
ax.set_title(title)
ax.set_xlabel(x0_label)
ax.set_ylabel(x1_label)
# The scatter plot
colors = cmap(y)
ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker='o', s=5, lw=0, c=colors)
# Removing the top and the right spine for aesthetics
# make nice axis layout
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Histogram for axis X1 (feature 5)
hist_X1.set_ylim(ax.get_ylim())
hist_X1.hist(X[:, 1], bins=hist_nbins, orientation='horizontal',
color='grey', ec='grey')
hist_X1.axis('off')
# Histogram for axis X0 (feature 0)
hist_X0.set_xlim(ax.get_xlim())
hist_X0.hist(X[:, 0], bins=hist_nbins, orientation='vertical',
color='grey', ec='grey')
hist_X0.axis('off')
###############################################################################
# Two plots will be shown for each scaler/normalizer/transformer. The left
# figure will show a scatter plot of the full data set while the right figure
# will exclude the extreme values considering only 99 % of the data set,
# excluding marginal outliers. In addition, the marginal distributions for each
# feature will be shown on the side of the scatter plot.
def make_plot(item_idx):
title, X = distributions[item_idx]
ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title)
axarr = (ax_zoom_out, ax_zoom_in)
plot_distribution(axarr[0], X, y, hist_nbins=200,
x0_label="Median Income",
x1_label="Number of households",
title="Full data")
# zoom-in
zoom_in_percentile_range = (0, 99)
cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range)
cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range)
non_outliers_mask = (
np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) &
np.all(X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1))
plot_distribution(axarr[1], X[non_outliers_mask], y[non_outliers_mask],
hist_nbins=50,
x0_label="Median Income",
x1_label="Number of households",
title="Zoom-in")
norm = mpl.colors.Normalize(y_full.min(), y_full.max())
mpl.colorbar.ColorbarBase(ax_colorbar, cmap=cmap,
norm=norm, orientation='vertical',
label='Color mapping for values of y')
########################################################################
# .. _results:
#
# Original data
# -------------
#
# Each transformation is plotted showing two transformed features, with the
# left plot showing the entire dataset, and the right zoomed-in to show the
# dataset without the marginal outliers. A large majority of the samples are
# compacted to a specific range, [0, 10] for the median income and [0, 6] for
# the number of households. Note that there are some marginal outliers (some
# blocks have more than 1200 households). Therefore, a specific pre-processing
# can be very beneficial depending of the application. In the following, we
# present some insights and behaviors of those pre-processing methods in the
# presence of marginal outliers.
make_plot(0)
#######################################################################
# StandardScaler
# --------------
#
# ``StandardScaler`` removes the mean and scales the data to unit variance.
# However, the outliers have an influence when computing the empirical mean and
# standard deviation which shrink the range of the feature values as shown in
# the left figure below. Note in particular that because the outliers on each
# feature have different magnitudes, the spread of the transformed data on
# each feature is very different: most of the data lie in the [-2, 4] range for
# the transformed median income feature while the same data is squeezed in the
# smaller [-0.2, 0.2] range for the transformed number of households.
#
# ``StandardScaler`` therefore cannot guarantee balanced feature scales in the
# presence of outliers.
make_plot(1)
##########################################################################
# MinMaxScaler
# ------------
#
# ``MinMaxScaler`` rescales the data set such that all feature values are in
# the range [0, 1] as shown in the right panel below. However, this scaling
# compress all inliers in the narrow range [0, 0.005] for the transformed
# number of households.
#
# As ``StandardScaler``, ``MinMaxScaler`` is very sensitive to the presence of
# outliers.
make_plot(2)
#############################################################################
# MaxAbsScaler
# ------------
#
# ``MaxAbsScaler`` differs from the previous scaler such that the absolute
# values are mapped in the range [0, 1]. On positive only data, this scaler
# behaves similarly to ``MinMaxScaler`` and therefore also suffers from the
# presence of large outliers.
make_plot(3)
##############################################################################
# RobustScaler
# ------------
#
# Unlike the previous scalers, the centering and scaling statistics of this
# scaler are based on percentiles and are therefore not influenced by a few
# number of very large marginal outliers. Consequently, the resulting range of
# the transformed feature values is larger than for the previous scalers and,
# more importantly, are approximately similar: for both features most of the
# transformed values lie in a [-2, 3] range as seen in the zoomed-in figure.
# Note that the outliers themselves are still present in the transformed data.
# If a separate outlier clipping is desirable, a non-linear transformation is
# required (see below).
make_plot(4)
##############################################################################
# PowerTransformer
# ----------------
#
# ``PowerTransformer`` applies a power transformation to each feature to make
# the data more Gaussian-like. Currently, ``PowerTransformer`` implements the
# Yeo-Johnson and Box-Cox transforms. The power transform finds the optimal
# scaling factor to stabilize variance and mimimize skewness through maximum
# likelihood estimation. By default, ``PowerTransformer`` also applies
# zero-mean, unit variance normalization to the transformed output. Note that
# Box-Cox can only be applied to strictly positive data. Income and number of
# households happen to be strictly positive, but if negative values are present
# the Yeo-Johnson transformed is to be preferred.
make_plot(5)
make_plot(6)
##############################################################################
# QuantileTransformer (Gaussian output)
# -------------------------------------
#
# ``QuantileTransformer`` has an additional ``output_distribution`` parameter
# allowing to match a Gaussian distribution instead of a uniform distribution.
# Note that this non-parametetric transformer introduces saturation artifacts
# for extreme values.
make_plot(7)
###################################################################
# QuantileTransformer (uniform output)
# ------------------------------------
#
# ``QuantileTransformer`` applies a non-linear transformation such that the
# probability density function of each feature will be mapped to a uniform
# distribution. In this case, all the data will be mapped in the range [0, 1],
# even the outliers which cannot be distinguished anymore from the inliers.
#
# As ``RobustScaler``, ``QuantileTransformer`` is robust to outliers in the
# sense that adding or removing outliers in the training set will yield
# approximately the same transformation on held out data. But contrary to
# ``RobustScaler``, ``QuantileTransformer`` will also automatically collapse
# any outlier by setting them to the a priori defined range boundaries (0 and
# 1).
make_plot(8)
##############################################################################
# Normalizer
# ----------
#
# The ``Normalizer`` rescales the vector for each sample to have unit norm,
# independently of the distribution of the samples. It can be seen on both
# figures below where all samples are mapped onto the unit circle. In our
# example the two selected features have only positive values; therefore the
# transformed data only lie in the positive quadrant. This would not be the
# case if some original features had a mix of positive and negative values.
make_plot(9)
plt.show()
| bsd-3-clause |
NICTA/revrand | revrand/slm.py | 1 | 8074 | """
The standard Bayesian linear regression model.
By using the appropriate bases, this will also yield an implementation of the
"A la Carte" GP [1]_.
.. [1] Yang, Z., Smola, A. J., Song, L., & Wilson, A. G. "A la Carte --
Learning Fast Kernels". Proceedings of the Eighteenth International
Conference on Artificial Intelligence and Statistics, pp. 1098-1106,
2015.
"""
from __future__ import division
import logging
from functools import partial
import numpy as np
from scipy.stats import gamma
from scipy.optimize import minimize
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
from sklearn.utils import check_random_state
from .utils import atleast_list, issequence
from .mathfun.linalg import solve_posdef
from .optimize import structured_minimizer, logtrick_minimizer
from .btypes import Parameter, Positive
from .basis_functions import LinearBasis, apply_grad
# Set up logging
log = logging.getLogger(__name__)
class StandardLinearModel(BaseEstimator, RegressorMixin):
"""
Bayesian Standard linear model.
Parameters
----------
basis : Basis
A basis object, see the basis_functions module.
var : Parameter, optional
observation variance initial value.
tol : float, optional
optimiser function tolerance convergence criterion.
maxiter : int, optional
maximum number of iterations for the optimiser.
nstarts : int, optional
if there are any parameters with distributions as initial values, this
determines how many random candidate starts shoulds be evaluated before
commencing optimisation at the best candidate.
random_state : None, int or RandomState, optional
random seed (mainly for random starts)
"""
def __init__(self,
basis=LinearBasis(),
var=Parameter(gamma(1.), Positive()),
tol=1e-8,
maxiter=1000,
nstarts=100,
random_state=None
):
"""See class docstring."""
self.basis = basis
self.var = var
self.tol = tol
self.maxiter = maxiter
self.nstarts = nstarts
self.random_state = random_state
self.random_ = check_random_state(random_state)
def fit(self, X, y):
"""
Learn the hyperparameters of a Bayesian linear regressor.
Parameters
----------
X : ndarray
(N, d) array input dataset (N samples, d dimensions).
y : ndarray
(N,) array targets (N samples)
Returns
-------
self
Notes
-----
This actually optimises the evidence lower bound on log marginal
likelihood, rather than log marginal likelihood directly. In the case
of a full posterior convariance matrix, this bound is tight and the
exact solution will be found (modulo local minima for the
hyperparameters).
This uses the python logging module for displaying learning status. To
view these messages have something like,
.. code ::
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
in your calling code.
"""
X, y = check_X_y(X, y)
self.obj_ = -np.inf
# Make list of parameters and decorate optimiser to undestand this
params = [self.var, self.basis.regularizer, self.basis.params]
nmin = structured_minimizer(logtrick_minimizer(minimize))
# Close over objective and learn parameters
elbo = partial(StandardLinearModel._elbo, self, X, y)
res = nmin(elbo,
params,
method='L-BFGS-B',
jac=True,
tol=self.tol,
options={'maxiter': self.maxiter, 'maxcor': 100},
random_state=self.random_,
nstarts=self.nstarts
)
# Upack learned parameters and report
self.var_, self.regularizer_, self.hypers_ = res.x
log.info("Done! ELBO = {}, var = {}, reg = {}, hypers = {}, "
"message = {}."
.format(-res['fun'],
self.var_,
self.regularizer_,
self.hypers_,
res.message)
)
return self
def _elbo(self, X, y, var, reg, hypers):
# Get Basis
Phi = self.basis.transform(X, *atleast_list(hypers)) # N x D
PhiPhi = Phi.T.dot(Phi)
N, D = Phi.shape
# Get regularizer
L, slices = self.basis.regularizer_diagonal(X, *atleast_list(reg))
iL = 1. / L
# Posterior Parameters
iC = np.diag(iL) + PhiPhi / var
C, logdetiC = solve_posdef(iC, np.eye(D))
logdetC = - logdetiC
m = C.dot(Phi.T.dot(y)) / var
# Common calcs
TrPhiPhiC = (PhiPhi * C).sum()
Err = y - Phi.dot(m)
sqErr = (Err**2).sum()
# Calculate ELBO
ELBO = -0.5 * (N * np.log(2 * np.pi * var)
+ sqErr / var
+ TrPhiPhiC / var
+ ((m**2 + C.diagonal()) * iL).sum()
- logdetC
+ np.log(L).sum()
- D)
# Cache optimal parameters so we don't have to recompute them later
if ELBO > self.obj_:
self.weights_ = m
self.covariance_ = C
self.obj_ = ELBO
log.info("ELBO = {}, var = {}, reg = {}, hypers = {}."
.format(ELBO, var, reg, hypers))
# Grad var
dvar = 0.5 * (-N + (sqErr + TrPhiPhiC) / var) / var
# Grad reg
def dreg(s):
return - 0.5 * (((m[s]**2 + C[s, s].diagonal()) * iL[s]**2).sum()
- iL[s].sum())
dL = list(map(dreg, slices)) if issequence(slices) else dreg(slices)
# Get structured basis function gradients
def dhyps(dPhi):
return - (m.T.dot(Err.dot(dPhi))
- (dPhi.T.dot(Phi) * C).sum()) / var
dhypers = apply_grad(dhyps, self.basis.grad(X, *atleast_list(hypers)))
return -ELBO, [-dvar, dL, dhypers]
def predict(self, X):
"""
Predict mean from Bayesian linear regression.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d dimensions).
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
"""
Ey, _ = self.predict_moments(X)
return Ey
def predict_moments(self, X):
"""
Full predictive distribution from Bayesian linear regression.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d dimensions).
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* for the query inputs, X* of shape
(N*,).
"""
check_is_fitted(self, ['var_', 'regularizer_', 'weights_',
'covariance_', 'hypers_'])
X = check_array(X)
Phi = self.basis.transform(X, *atleast_list(self.hypers_))
Ey = Phi.dot(self.weights_)
Vf = (Phi.dot(self.covariance_) * Phi).sum(axis=1)
return Ey, Vf + self.var_
def __repr__(self):
"""Representation."""
return "{}(basis={}, var={}, tol={}, maxiter={}, nstarts={}, "\
"random_state={})".format(
self.__class__.__name__,
self.basis,
self.var,
self.tol,
self.maxiter,
self.nstarts,
self.random_state
)
| apache-2.0 |
graphchallenge/GraphChallenge | StochasticBlockPartition/data/graph_generator.py | 1 | 10926 | from partition_baseline_support import *
import graph_tool.all as gt
import pandas as pd # for writing output graph TSV files
import numpy as np
import random
import scipy.stats as stats
# Generate the graph according to the blockmodel and parameters
N = 200 # number of nodes
save_graph = True
file_name = './simulated_blockmodel_graph_{:d}_nodes'.format(N) # output file name
num_blocks = int(N ** 0.35) # number of blocks grows sub-linearly with number of nodes. Exponent is a parameter.
print('Number of blocks: {}'.format(num_blocks))
# parameters for the Power-Law degree distribution
powerlaw_exponent = -2.5
min_degree = min(10, N / (num_blocks * 4)) # node degree range is adjusted lower when the blocks have few nodes
max_degree = min(100, N / num_blocks)
# sparsity parameter (1-density fraction of the edges will be removed)
density = 1
# define discrete power law distribution
def discrete_power_law(a, min_v, max_v):
x = np.arange(min_v, max_v + 1, dtype='float')
pmf = x ** a
pmf /= pmf.sum()
return stats.rv_discrete(values=(range(min_v, max_v + 1), pmf))
# set in-degree and out-degree distribution
rv_indegree = discrete_power_law(powerlaw_exponent, min_degree, max_degree)
rv_outdegree = discrete_power_law(powerlaw_exponent, min_degree, max_degree)
# define the return function for in and out degrees
def degree_distribution_function(rv1, rv2):
return (rv1.rvs(size=1), rv2.rvs(size=1))
# this parameter adjusts the ratio between the total number of within-block edges and between-block edges
ratio_within_over_between = 5
# set the within-block and between-block edge strength accordingly
def inter_block_strength(a, b):
if a == b: # within block interaction strength
return 1
else: # between block interaction strength
avg_within_block_nodes = float(N) / num_blocks
avg_between_block_nodes = N - avg_within_block_nodes
return avg_within_block_nodes / avg_between_block_nodes / ratio_within_over_between
# draw block membership distribution from a Dirichlet random variable
block_size_heterogeneity = 1 # 3; # larger means the block sizes are more uneven
block_distribution = np.random.dirichlet(np.ones(num_blocks) * 10 / block_size_heterogeneity, 1)[0]
print('Block distribution: {}'.format(block_distribution))
# draw block membership for each node
block_membership_vector = np.where(np.random.multinomial(n=1, size=N, pvals=block_distribution))[1]
true_partition = block_membership_vector
# generate the graph
if (float(gt.__version__[0:4]) >= 2.20): # specify inter-block strength through edge_probs in later versions
g_sample, block_membership = gt.random_graph(N, lambda: degree_distribution_function(rv_indegree, rv_outdegree), \
directed=True, model="blockmodel",
block_membership=block_membership_vector,
edge_probs=inter_block_strength, n_iter=10, verbose=False)
else: # specify inter-block strength through vertex_corr in earlier versions
g_sample, block_membership = gt.random_graph(N, lambda: degree_distribution_function(rv_indegree, rv_outdegree), \
directed=True, model="blockmodel",
block_membership=block_membership_vector,
vertex_corr=inter_block_strength, n_iter=10, verbose=False)
# remove (1-density) percent of the edges
edge_filter = g_sample.new_edge_property('bool')
edge_filter.a = stats.bernoulli.rvs(density, size=edge_filter.a.shape)
g_sample.set_edge_filter(edge_filter)
g_sample.purge_edges()
# store the nodal block memberships in a vertex property
g_sample.vertex_properties["block_membership"] = block_membership
# compute and report basic statistics on the generated graph
bg, bb, vcount, ecount, avp, aep = gt.condensation_graph(g_sample, block_membership, self_loops=True)
edge_count_between_blocks = np.zeros((num_blocks, num_blocks))
for e in bg.edges():
edge_count_between_blocks[bg.vertex_index[e.source()], bg.vertex_index[e.target()]] = ecount[e]
num_within_block_edges = sum(edge_count_between_blocks.diagonal())
num_between_block_edges = g_sample.num_edges() - num_within_block_edges
# print count statistics
print('Number of nodes: {}'.format(N))
print('Number of edges: {}'.format(g_sample.num_edges()))
print('Avg. Number of nodes per block: {}'.format(N / num_blocks))
print('# Within-block edges / # Between-blocks edges: {}'.format(num_within_block_edges / num_between_block_edges))
if save_graph: # output the graph and truth partition to TSV files with standard format
g_sample.save('{}.gt.bz2'.format(file_name)) # save graph-tool graph object
# store edge list
edge_list = np.zeros((g_sample.num_edges(), 3), dtype=int)
# populate the edge list.
counter = 0;
for e in g_sample.edges(): # iterate through all edges (edge list access is not available in all versions of graph-tool)
edge_list[counter, 0] = int(e.source()) + 1; # nodes are indexed starting at 1 in the standard format
edge_list[counter, 1] = int(e.target()) + 1; # nodes are indexed starting at 1 in the standard format
edge_list[counter, 2] = 1; # all edges are weighted equally at 1 in this generator
counter += 1
# write graph TSV file using pandas DataFrame
df_graph = pd.DataFrame(edge_list)
df_graph.to_csv('{}.tsv'.format(file_name), sep='\t', header=False, index=False)
# write truth partition TSV file using pandas DataFrame; nodes and blocks are indexed starting at 1 in the standard format
df_partition = pd.DataFrame(np.column_stack((np.arange(N) + 1, true_partition + 1)))
df_partition.to_csv('{}_truePartition.tsv'.format(file_name), sep='\t', header=False, index=False)
# Divide graph into streaming parts
streaming_mode = 0; # 0 for random samples of edges over time. 1 for snow-ball sampling of graph over time.
num_stages = 10; # number of stages to divide the graph into
num_nodes_per_stage = int(g_sample.num_vertices() / num_stages)
# store edge list
edge_list = np.zeros((g_sample.num_edges(), 3), dtype=int)
# populate the edge list.
counter = 0;
for e in g_sample.edges(): # iterate through all edges (access edge list with .get_edges() in later versions of graph-tool)
edge_list[counter, 0] = int(e.source())
edge_list[counter, 1] = int(e.target())
edge_list[counter, 2] = 1; # all edges are weighted equally at 1 in this generator
counter += 1
if streaming_mode == 0: # randomly assign edges to each of the graph stage
mode = "edgeSample"
edge_assignment = np.random.randint(num_stages, size=g_sample.num_edges())
else: # snowball sample the graph
mode = "snowball"
degrees = np.array(g_sample.degree_property_map('total').a)
starting_node = np.argmax(degrees) # start from a node with highest degree
nodes_already_observed = set()
nodes_in_current_stage = set([g_sample.vertex(starting_node)])
nodes_to_expand_to = set(g_sample.vertex(starting_node).all_neighbours())
edge_assignment = np.ones(g_sample.num_edges(), dtype=int) * -1
for stage in range(num_stages - 1): # snowball sample to build each stage of the streaming graph
while len(
nodes_in_current_stage) < num_nodes_per_stage: # expand until the current stage has the right number of nodes
if (len(nodes_in_current_stage) + len(nodes_to_expand_to) <= num_nodes_per_stage):
nodes_expanded = nodes_to_expand_to.copy() # expand all the available nodes
else: # expand a randomly sampled fraction of the available nodes
nodes_expanded = set(
random.sample(nodes_to_expand_to, (num_nodes_per_stage - len(nodes_in_current_stage))))
nodes_in_current_stage.update(nodes_expanded)
nodes_to_expand_to.discard(nodes_expanded)
# update the available nodes to expand to next
for v in nodes_expanded:
nodes_to_expand_to.update(v.all_neighbours())
nodes_to_expand_to = nodes_to_expand_to.difference(nodes_in_current_stage.union(nodes_already_observed))
nodes_already_observed = nodes_already_observed.union(nodes_in_current_stage)
node_idx_current_stage = np.array([g_sample.vertex_index[v] for v in nodes_in_current_stage])
node_idx_observed = np.array([g_sample.vertex_index[v] for v in nodes_already_observed])
# find all the edges in the current stage
edge_sel1 = np.logical_and(np.in1d(edge_list[:, 0], node_idx_current_stage),
np.in1d(edge_list[:, 1], node_idx_observed))
edge_sel2 = np.logical_and(np.in1d(edge_list[:, 1], node_idx_current_stage),
np.in1d(edge_list[:, 0], node_idx_observed))
edge_sel = np.logical_or(edge_sel1, edge_sel2)
edge_assignment[np.where(edge_sel)] = stage
nodes_in_current_stage = set()
edge_assignment[np.where(edge_assignment == -1)] = num_stages - 1 # label the remaining edges to the last stage
# output each stage of the graph and re-index the nodes so the observed nodes so far have consecutive indices
node_idx_map = np.ones(g_sample.num_vertices(), dtype=int) * -1
node_observed = np.zeros(g_sample.num_vertices(), dtype=bool)
num_nodes_so_far = 0;
for stage in range(num_stages):
edge_idx = (edge_assignment == stage).nonzero()[0]
node_idx = np.unique(edge_list[edge_idx, 0:2].ravel()) # find unique node indices in this stage
node_idx = np.delete(node_idx, node_observed[node_idx].nonzero()[0]) # find newly observed nodes
node_observed[node_idx] = 1 # mark these nodes as observed
# map the newly observed nodes with consecutive indices
node_idx_map[node_idx] = np.arange(start=num_nodes_so_far, stop=num_nodes_so_far + len(node_idx))
num_nodes_so_far += len(node_idx)
# write out the graph stage
edge_out = np.zeros((len(edge_idx), 3), dtype=int)
edge_out[:, 0] = node_idx_map[edge_list[edge_idx, 0]] + 1 # nodes are re-indexed. +1 for 1-indexed standard format
edge_out[:, 1] = node_idx_map[edge_list[edge_idx, 1]] + 1 # nodes are re-indexed. +1 for 1-indexed standard format
edge_out[:, 2] = edge_list[edge_idx, 2]
df_graph = pd.DataFrame(edge_out)
if save_graph:
df_graph.to_csv('{}_{}_{}.tsv'.format(file_name, mode, stage + 1), sep='\t', header=False, index=False)
if save_graph:
# write truth partition TSV file using pandas DataFrame; nodes and blocks are indexed starting at 1 in the standard format
df_partition = pd.DataFrame(np.column_stack((node_idx_map + 1, true_partition + 1)))
df_partition.to_csv('{}_{}_truePartition.tsv'.format(file_name, mode), sep='\t', header=False, index=False)
| mit |
DonBeo/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
calatre/epidemics_network | plt/SIR 1 plot.py | 1 | 2458 | # Universidade de Aveiro - Physics Department
# 2016/2017 Project - Andre Calatre, 73207
# "Simulation of an epidemic" - 16/5/2017
# Plotting Multiple Simulations of a SIR Epidemic Model
#import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['lines.linewidth'] = 0.2
rcParams['axes.linewidth'] = 0.1 #set the value globally
#Choosing the values for c and r to study
cvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
rvalues = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1,
0.25, 0.5, 0.75, 1]
#xlab = range(0,501,10)
i = 0
#box = {'facecolor':'yellow', 'alpha':0.9, 'pad':2}
#plt.figure(figsize = (20,30))
for cvar in cvalues:
for rvar in rvalues:
i += 1
print('Working...')
tblnm = 'c='+str(cvar)+'|r='+ str(rvar)
data = pd.read_excel('data/SIR_ns_data_shift.xlsx', sheetname = tblnm)
print('plotting...............................'+str(tblnm))
#print(data['S_Avg'])
plt.subplot(14,14,i)
y1 = data['Susceptible']
y2 = data['Infected']
y3 = data['Removed']
#e1 = data['nS_StD']
#e2 = data['nI_StD']
#e3 = data['nR_StD']
#ind = y1.index.values
plt.plot(y1,'g-')
#plt.fill_between(ind, y1-e1, y1+e1, linewidth=0,
#facecolor = 'g', alpha = 0.3, antialiased = True)
plt.plot(y2,'r-')
#plt.fill_between(ind, y2-e2, y2+e2, linewidth=0,
#facecolor = 'r', alpha = 0.3, antialiased = True)
plt.plot(y3,'b-')
# plt.fill_between(ind, y3-e3, y3+e3, linewidth=0,
#facecolor = 'b', alpha = 0.2, antialiased = True)
plt.axis([0,250,0,10000])
#plt.text(300,9500,tblnm, bbox= box)
plt.title('c*p='+str(cvar)+'|r='+ str(rvar), size=4, y=0.75)#, loc='right')
plt.subplots_adjust(bottom=0.01, right=0.99, top=0.97, left=0.01,
hspace=.4, wspace=.08)
#plt.xticks([])
#plt.yticks([])
plt.tick_params(labelbottom='off', labelleft='off', width = 0.05)
plt.grid(True, linewidth = 0.05)
#plt.tight_layout()
#plt.show()
plt.savefig('img/test.png', format='png', dpi=1200, figsize=(40,30))
| apache-2.0 |
cbertinato/pandas | pandas/tests/dtypes/test_concat.py | 1 | 1976 | import pytest
import pandas.core.dtypes.concat as _concat
from pandas import (
DatetimeIndex, Index, Period, PeriodIndex, Series, TimedeltaIndex)
@pytest.mark.parametrize('to_concat, expected', [
# int/float/str
([['a'], [1, 2]], ['i', 'object']),
([[3, 4], [1, 2]], ['i']),
([[3, 4], [1, 2.1]], ['i', 'f']),
# datetimelike
([DatetimeIndex(['2011-01-01']), DatetimeIndex(['2011-01-02'])],
['datetime']),
([TimedeltaIndex(['1 days']), TimedeltaIndex(['2 days'])],
['timedelta']),
# datetimelike object
([DatetimeIndex(['2011-01-01']),
DatetimeIndex(['2011-01-02'], tz='US/Eastern')],
['datetime', 'datetime64[ns, US/Eastern]']),
([DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
DatetimeIndex(['2011-01-02'], tz='US/Eastern')],
['datetime64[ns, Asia/Tokyo]', 'datetime64[ns, US/Eastern]']),
([TimedeltaIndex(['1 days']), TimedeltaIndex(['2 hours'])],
['timedelta']),
([DatetimeIndex(['2011-01-01'], tz='Asia/Tokyo'),
TimedeltaIndex(['1 days'])],
['datetime64[ns, Asia/Tokyo]', 'timedelta'])])
@pytest.mark.parametrize('klass', [Index, Series])
def test_get_dtype_kinds(klass, to_concat, expected):
to_concat_klass = [klass(c) for c in to_concat]
result = _concat.get_dtype_kinds(to_concat_klass)
assert result == set(expected)
@pytest.mark.parametrize('to_concat, expected', [
([PeriodIndex(['2011-01'], freq='M'),
PeriodIndex(['2011-01'], freq='M')], ['period[M]']),
([Series([Period('2011-01', freq='M')]),
Series([Period('2011-02', freq='M')])], ['period[M]']),
([PeriodIndex(['2011-01'], freq='M'),
PeriodIndex(['2011-01'], freq='D')], ['period[M]', 'period[D]']),
([Series([Period('2011-01', freq='M')]),
Series([Period('2011-02', freq='D')])], ['period[M]', 'period[D]'])])
def test_get_dtype_kinds_period(to_concat, expected):
result = _concat.get_dtype_kinds(to_concat)
assert result == set(expected)
| bsd-3-clause |
manipopopo/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 137 | 2219 | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
from tensorflow.python.platform import test
class CategoricalTest(test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(
min_frequency=0, share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"],
["3", "Male"]])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
test.main()
| apache-2.0 |
piiswrong/mxnet | example/speech_recognition/stt_utils.py | 44 | 5892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
scikit-hep/uproot | tests/test_tree.py | 1 | 44382 | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE
import os
from collections import namedtuple
import numpy
import pytest
import awkward0
import uproot3
def basest(array):
while getattr(array, "base", None) is not None:
array = array.base
return array
class Test(object):
###################################################### double32
def test_double32(self):
t = uproot3.open("tests/samples/demo-double32.root")["T"]
fD64 = t.array("fD64")
fF32 = t.array("fF32")
fI32 = t.array("fI32")
fI30 = t.array("fI30")
fI28 = t.array("fI28")
ratio_fF32 = fF32 / fD64
ratio_fI32 = fI32 / fD64
ratio_fI30 = fI30 / fD64
ratio_fI28 = fI28 / fD64
assert ratio_fF32.min() > 0.9999 and ratio_fF32.max() < 1.0001
assert ratio_fI32.min() > 0.9999 and ratio_fI32.max() < 1.0001
assert ratio_fI30.min() > 0.9999 and ratio_fI30.max() < 1.0001
assert ratio_fI28.min() > 0.9999 and ratio_fI28.max() < 1.0001
###################################################### basket
def test_flat_basket(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["i8"]
interpretation = branch._normalize_interpretation(None, awkward0)
entrystart, entrystop = uproot3.tree._normalize_entrystartstop(branch.numentries, None, None)
local_entrystart, local_entrystop = branch._localentries(0, entrystart, entrystop)
one = branch._basket(0, interpretation, local_entrystart, local_entrystop, awkward0, None, None)
two = branch._basket(0, interpretation, local_entrystart, local_entrystop, awkward0, None, None)
assert numpy.array_equal(one, numpy.array([-15, -14, -13], dtype=">i8"))
assert basest(one) is basest(two)
three = branch.basket(0)
assert numpy.array_equal(three, numpy.array([-15, -14, -13], dtype=">i8"))
assert basest(one) is not basest(three)
buf = numpy.zeros(10, dtype=numpy.float64)
four = branch.basket(0, interpretation.toarray(buf))
assert numpy.array_equal(four, numpy.array([-15, -14, -13], dtype=">i8"))
assert basest(four) is buf
def test_regular_basket(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["ai8"]
interpretation = branch._normalize_interpretation(None, awkward0)
entrystart, entrystop = uproot3.tree._normalize_entrystartstop(branch.numentries, None, None)
local_entrystart, local_entrystop = branch._localentries(0, entrystart, entrystop)
one = branch._basket(0, interpretation, local_entrystart, local_entrystop, awkward0, None, None)
two = branch._basket(0, interpretation, local_entrystart, local_entrystop, awkward0, None, None)
assert numpy.array_equal(one, numpy.array([[-14, -13, -12]], dtype=">i8"))
assert basest(one) is basest(two)
three = branch.basket(0)
assert numpy.array_equal(three, numpy.array([[-14, -13, -12]], dtype=">i8"))
assert basest(one) is not basest(three)
assert branch.basket(0, interpretation.to(todims=(3,))).shape == (1, 3)
assert branch.basket(0, interpretation.to(todims=())).shape == (3,)
assert branch.basket(0, interpretation.to(todims=(1,))).shape == (3, 1)
assert branch.basket(0, interpretation.to(todims=(1, 1))).shape == (3, 1, 1)
assert branch.basket(0, interpretation.to(todims=(1, 3))).shape == (1, 1, 3)
buf = numpy.zeros(10, dtype=numpy.float64)
four = branch.basket(0, interpretation.toarray(buf))
assert numpy.array_equal(four, numpy.array([-14, -13, -12], dtype=">i8"))
assert basest(four) is buf
def test_irregular_basket(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["Ai8"]
interpretation = branch._normalize_interpretation(None, awkward0)
entrystart, entrystop = uproot3.tree._normalize_entrystartstop(branch.numentries, None, None)
local_entrystart, local_entrystop = branch._localentries(0, entrystart, entrystop)
one = branch._basket(0, interpretation, local_entrystart, local_entrystop, awkward0, None, None)
two = branch._basket(0, interpretation, local_entrystart, local_entrystop, awkward0, None, None)
assert numpy.array_equal(one[0], numpy.array([], dtype=">i8"))
assert numpy.array_equal(one[1], numpy.array([-15], dtype=">i8"))
assert basest(one.content) is basest(two.content)
three = branch.basket(0)
assert numpy.array_equal(three[0], numpy.array([], dtype=">i8"))
assert numpy.array_equal(three[1], numpy.array([-15], dtype=">i8"))
def test_strings_basket(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["str"]
interpretation = branch._normalize_interpretation(None, awkward0)
entrystart, entrystop = uproot3.tree._normalize_entrystartstop(branch.numentries, None, None)
local_entrystart, local_entrystop = branch._localentries(0, entrystart, entrystop)
one = branch.basket(0, interpretation, local_entrystart, local_entrystop)
two = branch.basket(0, interpretation, local_entrystart, local_entrystop)
assert one.tolist() == [b"hey-0", b"hey-1", b"hey-2", b"hey-3", b"hey-4", b"hey-5"]
assert basest(one.content.content) is not basest(two.content.content)
three = branch.basket(0)
assert three.tolist() == [b"hey-0", b"hey-1", b"hey-2", b"hey-3", b"hey-4", b"hey-5"]
###################################################### baskets
def test_flat_baskets(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["i8"]
expectation = [[-15, -14, -13], [-12, -11, -10], [-9, -8, -7], [-6, -5, -4], [-3, -2, -1], [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]]
assert [x.tolist() for x in branch.baskets()] == expectation
assert [x.tolist() for x in branch.iterate_baskets()] == expectation
def test_regular_baskets(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["ai8"]
expectation = [[[-14, -13, -12]], [[-13, -12, -11]], [[-12, -11, -10]], [[-11, -10, -9]], [[-10, -9, -8]], [[-9, -8, -7]], [[-8, -7, -6]], [[-7, -6, -5]], [[-6, -5, -4]], [[-5, -4, -3]], [[-4, -3, -2]], [[-3, -2, -1]], [[-2, -1, 0]], [[-1, 0, 1]], [[0, 1, 2]], [[1, 2, 3]], [[2, 3, 4]], [[3, 4, 5]], [[4, 5, 6]], [[5, 6, 7]], [[6, 7, 8]], [[7, 8, 9]], [[8, 9, 10]], [[9, 10, 11]], [[10, 11, 12]], [[11, 12, 13]], [[12, 13, 14]], [[13, 14, 15]], [[14, 15, 16]], [[15, 16, 17]]]
assert [x.tolist() for x in branch.baskets()] == expectation
assert [x.tolist() for x in branch.iterate_baskets()] == expectation
def test_irregular_baskets(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["Ai8"]
expectation = [[[], [-15]], [[-15, -13]], [[-15, -13, -11]], [[-15, -13, -11, -9]], [[], [-10]], [[-10, -8]], [[-10, -8, -6]], [[-10, -8, -6, -4]], [[], [-5]], [[-5, -3]], [[-5, -3, -1]], [[-5, -3, -1, 1]], [[], [0]], [[0, 2]], [[0, 2, 4]], [[0, 2, 4, 6]], [[], [5]], [[5, 7]], [[5, 7, 9]], [[5, 7, 9, 11]], [[], [10]], [[10, 12]], [[10, 12, 14]], [[10, 12, 14, 16]]]
assert [len(y) for x in expectation for y in x] == [0, 1, 2, 3, 4] * 6
assert [x.tolist() for x in branch.baskets()] == expectation
assert [x.tolist() for x in branch.iterate_baskets()] == expectation
def test_strings_baskets(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["str"]
expectation = [[b"hey-0", b"hey-1", b"hey-2", b"hey-3", b"hey-4", b"hey-5"], [b"hey-6", b"hey-7", b"hey-8", b"hey-9", b"hey-10"], [b"hey-11", b"hey-12", b"hey-13", b"hey-14", b"hey-15"], [b"hey-16", b"hey-17", b"hey-18", b"hey-19", b"hey-20"], [b"hey-21", b"hey-22", b"hey-23", b"hey-24", b"hey-25"], [b"hey-26", b"hey-27", b"hey-28", b"hey-29"]]
assert [x.tolist() for x in branch.baskets()] == expectation
assert [x.tolist() for x in branch.iterate_baskets()] == expectation
###################################################### array
def test_flat_array(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["i8"]
expectation = [-15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
for entrystart, entrystop in [(None, None), (1, None), (1, 2), (1, 10), (10, 11), (10, 20), (6, 12), (6, 13)]:
assert branch.array(entrystart=entrystart, entrystop=entrystop).tolist() == expectation[entrystart:entrystop]
def test_regular_array(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["ai8"]
expectation = [[-14, -13, -12], [-13, -12, -11], [-12, -11, -10], [-11, -10, -9], [-10, -9, -8], [-9, -8, -7], [-8, -7, -6], [-7, -6, -5], [-6, -5, -4], [-5, -4, -3], [-4, -3, -2], [-3, -2, -1], [-2, -1, 0], [-1, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17]]
for entrystart, entrystop in [(None, None), (1, None), (1, 2), (1, 10), (10, 11), (10, 20), (6, 12), (6, 13)]:
assert branch.array(entrystart=entrystart, entrystop=entrystop).tolist() == expectation[entrystart:entrystop]
def test_irregular_array(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["Ai8"]
expectation = [[], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16]]
assert [len(x) for x in expectation] == [0, 1, 2, 3, 4] * 6
for entrystart, entrystop in [(None, None), (1, None), (1, 2), (1, 10), (10, 11), (10, 20), (6, 12), (6, 13)]:
assert branch.array(entrystart=entrystart, entrystop=entrystop).tolist() == expectation[entrystart:entrystop]
def test_strings_array(self):
branch = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]["str"]
expectation = [b"hey-0", b"hey-1", b"hey-2", b"hey-3", b"hey-4", b"hey-5", b"hey-6", b"hey-7", b"hey-8", b"hey-9", b"hey-10", b"hey-11", b"hey-12", b"hey-13", b"hey-14", b"hey-15", b"hey-16", b"hey-17", b"hey-18", b"hey-19", b"hey-20", b"hey-21", b"hey-22", b"hey-23", b"hey-24", b"hey-25", b"hey-26", b"hey-27", b"hey-28", b"hey-29"]
for entrystart, entrystop in [(None, None), (1, None), (1, 2), (1, 10), (10, 11), (10, 20), (6, 12), (6, 13)]:
assert branch.array(entrystart=entrystart, entrystop=entrystop).tolist() == expectation[entrystart:entrystop]
###################################################### iterate
def test_flat_iterate(self):
tree = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]
expectation = [-15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
for n in 1000, 5, 6, 7:
assert [x.tolist() for (x,) in tree.iterate("i8", n, outputtype=tuple)] == [expectation[x : x + n] for x in range(0, len(expectation), n)]
def test_regular_iterate(self):
tree = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]
expectation = [[-14, -13, -12], [-13, -12, -11], [-12, -11, -10], [-11, -10, -9], [-10, -9, -8], [-9, -8, -7], [-8, -7, -6], [-7, -6, -5], [-6, -5, -4], [-5, -4, -3], [-4, -3, -2], [-3, -2, -1], [-2, -1, 0], [-1, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [11, 12, 13], [12, 13, 14], [13, 14, 15], [14, 15, 16], [15, 16, 17]]
for n in 1000, 5, 6, 7:
assert [x.tolist() for (x,) in tree.iterate("ai8", n, outputtype=tuple)] == [expectation[x : x + n] for x in range(0, len(expectation), n)]
def test_irregular_iterate(self):
tree = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]
expectation = [[], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16]]
for n in 1000, 5, 6, 7:
assert [x.tolist() for (x,) in tree.iterate("Ai8", n, outputtype=tuple)] == [expectation[x : x + n] for x in range(0, len(expectation), n)]
def test_strings_iterate(self):
tree = uproot3.open("tests/samples/sample-6.10.05-uncompressed.root")["sample"]
expectation = [b"hey-0", b"hey-1", b"hey-2", b"hey-3", b"hey-4", b"hey-5", b"hey-6", b"hey-7", b"hey-8", b"hey-9", b"hey-10", b"hey-11", b"hey-12", b"hey-13", b"hey-14", b"hey-15", b"hey-16", b"hey-17", b"hey-18", b"hey-19", b"hey-20", b"hey-21", b"hey-22", b"hey-23", b"hey-24", b"hey-25", b"hey-26", b"hey-27", b"hey-28", b"hey-29"]
for n in 1000, 5, 6, 7:
assert [x.tolist() for (x,) in tree.iterate("str", n, outputtype=tuple)] == [expectation[x : x + n] for x in range(0, len(expectation), n)]
###################################################### old tests
def test_branch_array(self):
file = uproot3.open("tests/samples/simple.root")
repr(file)
tree = file["tree"]
repr(tree)
repr(tree["one"])
assert tree["one"].array().tolist() == [1, 2, 3, 4]
assert tree["two"].array().tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert tree["three"].array().tolist() == [b"uno", b"dos", b"tres", b"quatro"]
assert tree["one"].array().tolist() == [1, 2, 3, 4]
assert tree["two"].array().tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert tree["three"].array().tolist() == [b"uno", b"dos", b"tres", b"quatro"]
tree = file["tree"]
assert tree["one"].array().tolist() == [1, 2, 3, 4]
assert tree["two"].array().tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert tree["three"].array().tolist() == [b"uno", b"dos", b"tres", b"quatro"]
def test_tree_arrays(self):
file = uproot3.open("tests/samples/simple.root")
tree = file["tree"]
arrays = tree.arrays()
assert arrays[b"one"].tolist() == [1, 2, 3, 4]
assert arrays[b"two"].tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert arrays[b"three"].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
# get arrays again
arrays = tree.arrays()
assert arrays[b"one"].tolist() == [1, 2, 3, 4]
assert arrays[b"two"].tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert arrays[b"three"].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
# get tree again
tree = file["tree"]
arrays = tree.arrays()
assert arrays[b"one"].tolist() == [1, 2, 3, 4]
assert arrays[b"two"].tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert arrays[b"three"].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
def test_tree_arrays_namedecode(self):
file = uproot3.open("tests/samples/simple.root")
tree = file["tree"]
arrays = tree.arrays(namedecode="utf-8")
assert arrays["one"].tolist() == [1, 2, 3, 4]
assert arrays["two"].tolist() == numpy.array([1.1, 2.2, 3.3, 4.4], dtype=numpy.float32).tolist()
assert arrays["three"].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
def test_tree_iterator1(self):
# one big array
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=1000):
assert arrays[b"data"].tolist() == list(range(46))
# size is equal to basket size (for most baskets)
i = 0
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=6):
assert arrays[b"data"].tolist() == list(range(i, min(i + 6, 46)))
i += 6
# size is smaller
i = 0
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=3):
assert arrays[b"data"].tolist() == list(range(i, min(i + 3, 46)))
i += 3
i = 0
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=4):
assert arrays[b"data"].tolist() == list(range(i, min(i + 4, 46)))
i += 4
# size is larger
i = 0
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=12):
assert arrays[b"data"].tolist() == list(range(i, min(i + 12, 46)))
i += 12
i = 0
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=10):
assert arrays[b"data"].tolist() == list(range(i, min(i + 10, 46)))
i += 10
# singleton case
i = 0
for arrays in uproot3.open("tests/samples/foriter.root")["foriter"].iterate(entrysteps=1):
assert arrays[b"data"].tolist() == list(range(i, min(i + 1, 46)))
i += 1
def test_tree_iterator2(self):
words = [b"zero", b"one", b"two", b"three", b"four", b"five", b"six", b"seven", b"eight", b"nine", b"ten", b"eleven", b"twelve", b"thirteen", b"fourteen", b"fifteen", b"sixteen", b"seventeen", b"eighteen", b"ninteen", b"twenty", b"twenty-one", b"twenty-two", b"twenty-three", b"twenty-four", b"twenty-five", b"twenty-six", b"twenty-seven", b"twenty-eight", b"twenty-nine", b"thirty"]
# one big array
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=1000):
assert arrays[b"data"].tolist() == words
# size is equal to basket size (for most baskets)
i = 0
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=6):
assert arrays[b"data"].tolist() == words[i:i + 6]
i += 6
# size is smaller
i = 0
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=3):
assert arrays[b"data"].tolist() == words[i:i + 3]
i += 3
i = 0
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=4):
assert arrays[b"data"].tolist() == words[i:i + 4]
i += 4
# size is larger
i = 0
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=12):
assert arrays[b"data"].tolist() == words[i:i + 12]
i += 12
i = 0
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=10):
assert arrays[b"data"].tolist() == words[i:i + 10]
i += 10
# singleton case
i = 0
for arrays in uproot3.open("tests/samples/foriter2.root")["foriter2"].iterate(entrysteps=1):
assert arrays[b"data"].tolist() == words[i:i + 1]
i += 1
def test_tree_iterator3(self):
source = list(range(46))
# one big array
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=1000):
assert arrays[b"data"].tolist() == source
# size is equal to basket size (for most baskets)
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=6):
assert arrays[b"data"].tolist() == source[i : i + 6]
i += 6
if i > 45: i = 0
# size is smaller
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=3):
assert arrays[b"data"].tolist() == source[i : i + 3]
i += 3
if i > 45: i = 0
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=4):
assert arrays[b"data"].tolist() == source[i : i + 4]
i += 4
if i > 45: i = 0
# size is larger
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=12):
assert arrays[b"data"].tolist() == source[i : i + 12]
i += 12
if i > 45: i = 0
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=10):
assert arrays[b"data"].tolist() == source[i : i + 10]
i += 10
if i > 45: i = 0
# singleton case
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter.root", "tests/samples/foriter.root"], "foriter", entrysteps=1):
assert arrays[b"data"].tolist() == source[i : i + 1]
i += 1
if i > 45: i = 0
def test_tree_iterator4(self):
words2 = [b"zero", b"one", b"two", b"three", b"four", b"five", b"six", b"seven", b"eight", b"nine", b"ten", b"eleven", b"twelve", b"thirteen", b"fourteen", b"fifteen", b"sixteen", b"seventeen", b"eighteen", b"ninteen", b"twenty", b"twenty-one", b"twenty-two", b"twenty-three", b"twenty-four", b"twenty-five", b"twenty-six", b"twenty-seven", b"twenty-eight", b"twenty-nine", b"thirty"]
# one big array
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=1000):
assert arrays[b"data"].tolist() == words2
# size is equal to basket size (for most baskets)
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=6):
assert arrays[b"data"].tolist() == words2[i : i + 6]
i += 6
if i > 30: i = 0
# size is smaller
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=3):
assert arrays[b"data"].tolist() == words2[i : i + 3]
i += 3
if i > 30: i = 0
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=4):
assert arrays[b"data"].tolist() == words2[i : i + 4]
i += 4
if i > 30: i = 0
# size is larger
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=12):
assert arrays[b"data"].tolist() == words2[i : i + 12]
i += 12
if i > 30: i = 0
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=10):
assert arrays[b"data"].tolist() == words2[i : i + 10]
i += 10
if i > 30: i = 0
# singleton case
i = 0
for arrays in uproot3.iterate(["tests/samples/foriter2.root", "tests/samples/foriter2.root"], "foriter2", entrysteps=1):
assert arrays[b"data"].tolist() == words2[i : i + 1]
i += 1
if i > 30: i = 0
def test_directories(self):
file = uproot3.open("tests/samples/nesteddirs.root")
assert [(n, cls._classname) for n, cls in file.classes()] == [(b"one;1", b"TDirectory"), (b"three;1", b"TDirectory")]
assert [(n, cls._classname) for n, cls in file.allclasses()] == [(b"one;1", b"TDirectory"), (b"one/two;1", b"TDirectory"), (b"one/two/tree;1", b"TTree"), (b"one/tree;1", b"TTree"), (b"three;1", b"TDirectory"), (b"three/tree;1", b"TTree")]
assert list(file["one"]["tree"].keys()) == [b"one", b"two", b"three"]
assert list(file["one"].get("tree", 1).keys()) == [b"one", b"two", b"three"]
assert list(file["one/tree;1"].keys()) == [b"one", b"two", b"three"]
assert list(file["one/two/tree;1"].keys()) == [b"Int32", b"Int64", b"UInt32", b"UInt64", b"Float32", b"Float64", b"Str", b"ArrayInt32", b"ArrayInt64", b"ArrayUInt32", b"ArrayUInt64", b"ArrayFloat32", b"ArrayFloat64", b"N", b"SliceInt32", b"SliceInt64", b"SliceUInt32", b"SliceUInt64", b"SliceFloat32", b"SliceFloat64"]
assert list(file["three/tree;1"].keys()) == [b"evt"]
assert dict((name, array.tolist()) for name, array in file["one/tree"].arrays(["one", "two", "three"]).items()) == {b"one": [1, 2, 3, 4], b"two": [1.100000023841858, 2.200000047683716, 3.299999952316284, 4.400000095367432], b"three": [b"uno", b"dos", b"tres", b"quatro"]}
assert file["one/two/tree"].array("Int32").shape == (100,)
assert file["three/tree"].array("I32").shape == (100,)
file = uproot3.open("tests/samples/nesteddirs.root")
assert list(file["one/tree"].keys()) == [b"one", b"two", b"three"]
assert list(file["one/two/tree"].keys()) == [b"Int32", b"Int64", b"UInt32", b"UInt64", b"Float32", b"Float64", b"Str", b"ArrayInt32", b"ArrayInt64", b"ArrayUInt32", b"ArrayUInt64", b"ArrayFloat32", b"ArrayFloat64", b"N", b"SliceInt32", b"SliceInt64", b"SliceUInt32", b"SliceUInt64", b"SliceFloat32", b"SliceFloat64"]
assert list(file["three/tree"].keys()) == [b"evt"]
assert dict((name, array.tolist()) for name, array in file["one/tree;1"].arrays(["one", "two", "three"]).items()) == {b"one": [1, 2, 3, 4], b"two": [1.100000023841858, 2.200000047683716, 3.299999952316284, 4.400000095367432], b"three": [b"uno", b"dos", b"tres", b"quatro"]}
assert file["one/two/tree;1"].array("Int32").shape == (100,)
assert file["three/tree;1"].array("I32").shape == (100,)
def test_cast(self):
tree = uproot3.open("tests/samples/Zmumu.root")["events"]
one = numpy.cast[numpy.int32](numpy.floor(tree.array("M")))
two = tree.array("M", numpy.int32)
assert one.dtype == two.dtype
assert one.shape == two.shape
assert numpy.array_equal(one, two)
for (one,) in tree.iterate("M", 10000, outputtype=tuple):
one = numpy.cast[numpy.int32](numpy.floor(one))
for (two,) in tree.iterate({"M": numpy.int32}, 10000, outputtype=tuple):
pass
assert one.dtype == two.dtype
assert one.shape == two.shape
assert numpy.array_equal(one, two)
def test_pass_array(self):
tree = uproot3.open("tests/samples/Zmumu.root")["events"]
one = numpy.cast[numpy.int32](numpy.floor(tree.array("M")))
two = numpy.zeros(one.shape, dtype=one.dtype)
tree.array("M", two)
assert numpy.array_equal(one, two)
for (one,) in tree.iterate("M", 10000, outputtype=tuple):
one = numpy.cast[numpy.int32](numpy.floor(one))
two = numpy.zeros(one.shape, dtype=one.dtype)
for (two,) in tree.iterate({"M": numpy.int32}, 10000, outputtype=tuple):
assert numpy.array_equal(one, two)
def test_outputtype(self):
tree = uproot3.open("tests/samples/simple.root")["tree"]
arrays = tree.arrays(["three", "two", "one"], outputtype=dict)
assert isinstance(arrays, dict)
assert arrays[b"one"].tolist() == [1, 2, 3, 4]
assert arrays[b"three"].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
arrays = tree.arrays(["three", "two", "one"], outputtype=tuple)
assert isinstance(arrays, tuple)
assert arrays[2].tolist() == [1, 2, 3, 4]
assert arrays[0].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
arrays = tree.arrays(["three", "two", "one"], outputtype=namedtuple)
assert arrays.one.tolist() == [1, 2, 3, 4]
assert arrays.three.tolist() == [b"uno", b"dos", b"tres", b"quatro"]
arrays = tree.arrays(["three", "two", "one"], outputtype=list)
assert isinstance(arrays, list)
assert arrays[2].tolist() == [1, 2, 3, 4]
assert arrays[0].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
class Awesome(object):
def __init__(self, one, two, three):
self.one = one
self.two = two
self.three = three
arrays = tree.arrays(["one", "two", "three"], outputtype=Awesome)
assert isinstance(arrays, Awesome)
assert arrays.one.tolist() == [1, 2, 3, 4]
assert arrays.three.tolist() == [b"uno", b"dos", b"tres", b"quatro"]
class MyList(list):
pass
class MyTuple(tuple):
pass
arrays = tree.arrays(["three", "two", "one"], outputtype=MyList)
assert isinstance(arrays, MyList)
assert arrays[2].tolist() == [1, 2, 3, 4]
assert arrays[0].tolist() == [b"uno", b"dos", b"tres", b"quatro"]
arrays = tree.arrays(["three", "two", "one"], outputtype=MyTuple)
assert isinstance(arrays, MyTuple)
def test_tree_lazy(self):
tree = uproot3.open("tests/samples/sample-5.30.00-uncompressed.root")["sample"]
for branchname in b"u1", b"i8", b"Ai8", b"f4", b"af4":
strict = tree[branchname].array()
lazy = tree[branchname].lazyarray()
for i in range(len(lazy)):
assert lazy[i].tolist() == strict[i].tolist()
lazy = tree[branchname].lazyarray()
for i in range(len(lazy), 0, -1):
assert lazy[i - 1].tolist() == strict[i - 1].tolist()
lazy = tree[branchname].lazyarray()
for i in range(len(lazy)):
assert lazy[i : i + 3].tolist() == strict[i : i + 3].tolist()
lazy = tree[branchname].lazyarray()
for i in range(len(lazy), 0, -1):
assert lazy[i - 1 : i + 3].tolist() == strict[i - 1 : i + 3].tolist()
def test_tree_lazy2(self):
tree = uproot3.open("tests/samples/sample-5.30.00-uncompressed.root")["sample"]
lazy = tree.lazyarrays()
for branchname in "u1", "i8", "Ai8", "f4", "af4":
strict = tree[branchname.encode()].array()
for i in range(len(lazy)):
assert lazy[branchname][i].tolist() == strict[i].tolist()
for i in range(len(lazy), 0, -1):
assert lazy[branchname][i - 1].tolist() == strict[i - 1].tolist()
for i in range(len(lazy)):
assert lazy[branchname][i : i + 3].tolist() == strict[i : i + 3].tolist()
for i in range(len(lazy), 0, -1):
assert lazy[branchname][i - 1 : i + 3].tolist() == strict[i - 1 : i + 3].tolist()
def test_tree_lazy3(self):
lazy = uproot3.lazyarrays(["tests/samples/sample-5.29.02-uncompressed.root", "tests/samples/sample-5.30.00-uncompressed.root"], "sample")
assert lazy["u1"].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
assert lazy["i8"].tolist() == [-15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
assert lazy["Ai8"].tolist() == [[], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16], [], [-15], [-15, -13], [-15, -13, -11], [-15, -13, -11, -9], [], [-10], [-10, -8], [-10, -8, -6], [-10, -8, -6, -4], [], [-5], [-5, -3], [-5, -3, -1], [-5, -3, -1, 1], [], [0], [0, 2], [0, 2, 4], [0, 2, 4, 6], [], [5], [5, 7], [5, 7, 9], [5, 7, 9, 11], [], [10], [10, 12], [10, 12, 14], [10, 12, 14, 16]]
assert lazy["f4"].tolist() == [-14.899999618530273, -13.899999618530273, -12.899999618530273, -11.899999618530273, -10.899999618530273, -9.899999618530273, -8.899999618530273, -7.900000095367432, -6.900000095367432, -5.900000095367432, -4.900000095367432, -3.9000000953674316, -2.9000000953674316, -1.899999976158142, -0.8999999761581421, 0.10000000149011612, 1.100000023841858, 2.0999999046325684, 3.0999999046325684, 4.099999904632568, 5.099999904632568, 6.099999904632568, 7.099999904632568, 8.100000381469727, 9.100000381469727, 10.100000381469727, 11.100000381469727, 12.100000381469727, 13.100000381469727, 14.100000381469727, -14.899999618530273, -13.899999618530273, -12.899999618530273, -11.899999618530273, -10.899999618530273, -9.899999618530273, -8.899999618530273, -7.900000095367432, -6.900000095367432, -5.900000095367432, -4.900000095367432, -3.9000000953674316, -2.9000000953674316, -1.899999976158142, -0.8999999761581421, 0.10000000149011612, 1.100000023841858, 2.0999999046325684, 3.0999999046325684, 4.099999904632568, 5.099999904632568, 6.099999904632568, 7.099999904632568, 8.100000381469727, 9.100000381469727, 10.100000381469727, 11.100000381469727, 12.100000381469727, 13.100000381469727, 14.100000381469727]
assert lazy["af4"].tolist() == [[-13.899999618530273, -12.899999618530273, -11.899999618530273], [-12.899999618530273, -11.899999618530273, -10.899999618530273], [-11.899999618530273, -10.899999618530273, -9.899999618530273], [-10.899999618530273, -9.899999618530273, -8.899999618530273], [-9.899999618530273, -8.899999618530273, -7.900000095367432], [-8.899999618530273, -7.900000095367432, -6.900000095367432], [-7.900000095367432, -6.900000095367432, -5.900000095367432], [-6.900000095367432, -5.900000095367432, -4.900000095367432], [-5.900000095367432, -4.900000095367432, -3.9000000953674316], [-4.900000095367432, -3.9000000953674316, -2.9000000953674316], [-3.9000000953674316, -2.9000000953674316, -1.899999976158142], [-2.9000000953674316, -1.899999976158142, -0.8999999761581421], [-1.899999976158142, -0.8999999761581421, 0.10000000149011612], [-0.8999999761581421, 0.10000000149011612, 1.100000023841858], [0.10000000149011612, 1.100000023841858, 2.0999999046325684], [1.100000023841858, 2.0999999046325684, 3.0999999046325684], [2.0999999046325684, 3.0999999046325684, 4.099999904632568], [3.0999999046325684, 4.099999904632568, 5.099999904632568], [4.099999904632568, 5.099999904632568, 6.099999904632568], [5.099999904632568, 6.099999904632568, 7.099999904632568], [6.099999904632568, 7.099999904632568, 8.100000381469727], [7.099999904632568, 8.100000381469727, 9.100000381469727], [8.100000381469727, 9.100000381469727, 10.100000381469727], [9.100000381469727, 10.100000381469727, 11.100000381469727], [10.100000381469727, 11.100000381469727, 12.100000381469727], [11.100000381469727, 12.100000381469727, 13.100000381469727], [12.100000381469727, 13.100000381469727, 14.100000381469727], [13.100000381469727, 14.100000381469727, 15.100000381469727], [14.100000381469727, 15.100000381469727, 16.100000381469727], [15.100000381469727, 16.100000381469727, 17.100000381469727], [-13.899999618530273, -12.899999618530273, -11.899999618530273], [-12.899999618530273, -11.899999618530273, -10.899999618530273], [-11.899999618530273, -10.899999618530273, -9.899999618530273], [-10.899999618530273, -9.899999618530273, -8.899999618530273], [-9.899999618530273, -8.899999618530273, -7.900000095367432], [-8.899999618530273, -7.900000095367432, -6.900000095367432], [-7.900000095367432, -6.900000095367432, -5.900000095367432], [-6.900000095367432, -5.900000095367432, -4.900000095367432], [-5.900000095367432, -4.900000095367432, -3.9000000953674316], [-4.900000095367432, -3.9000000953674316, -2.9000000953674316], [-3.9000000953674316, -2.9000000953674316, -1.899999976158142], [-2.9000000953674316, -1.899999976158142, -0.8999999761581421], [-1.899999976158142, -0.8999999761581421, 0.10000000149011612], [-0.8999999761581421, 0.10000000149011612, 1.100000023841858], [0.10000000149011612, 1.100000023841858, 2.0999999046325684], [1.100000023841858, 2.0999999046325684, 3.0999999046325684], [2.0999999046325684, 3.0999999046325684, 4.099999904632568], [3.0999999046325684, 4.099999904632568, 5.099999904632568], [4.099999904632568, 5.099999904632568, 6.099999904632568], [5.099999904632568, 6.099999904632568, 7.099999904632568], [6.099999904632568, 7.099999904632568, 8.100000381469727], [7.099999904632568, 8.100000381469727, 9.100000381469727], [8.100000381469727, 9.100000381469727, 10.100000381469727], [9.100000381469727, 10.100000381469727, 11.100000381469727], [10.100000381469727, 11.100000381469727, 12.100000381469727], [11.100000381469727, 12.100000381469727, 13.100000381469727], [12.100000381469727, 13.100000381469727, 14.100000381469727], [13.100000381469727, 14.100000381469727, 15.100000381469727], [14.100000381469727, 15.100000381469727, 16.100000381469727], [15.100000381469727, 16.100000381469727, 17.100000381469727]]
def test_tree_lazy_cached(self):
tree = uproot3.open("tests/samples/sample-5.30.00-uncompressed.root")["sample"]
cache = {}
keycache = {}
basketcache = {}
for branchname in b"u1", b"i8", b"Ai8", b"f4", b"af4":
strict = tree[branchname].array()
lazy = tree[branchname].lazyarray(cache=cache, keycache=keycache, basketcache=basketcache)
for i in range(len(lazy)):
assert lazy[i].tolist() == strict[i].tolist()
lazy = tree[branchname].lazyarray(cache=cache, keycache=keycache, basketcache=basketcache)
for i in range(len(lazy), 0, -1):
assert lazy[i - 1].tolist() == strict[i - 1].tolist()
lazy = tree[branchname].lazyarray(cache=cache, keycache=keycache, basketcache=basketcache)
for i in range(len(lazy)):
assert lazy[i : i + 3].tolist() == strict[i : i + 3].tolist()
lazy = tree[branchname].lazyarray(cache=cache, keycache=keycache, basketcache=basketcache)
for i in range(len(lazy), 0, -1):
assert lazy[i - 1 : i + 3].tolist() == strict[i - 1 : i + 3].tolist()
@pytest.mark.parametrize("use_http", [False, True])
@pytest.mark.skip(reason="http://scikit-hep.org/uproot/examples/Event.root moved")
def test_hist_in_tree(self, use_http):
if use_http:
pytest.importorskip("requests")
tree = uproot3.open("http://scikit-hep.org/uproot3/examples/Event.root")["T"]
else:
path = os.path.join("tests", "samples", "Event.root")
if not os.path.exists(path):
raise pytest.skip()
tree = uproot3.open(path)["T"]
check = [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
assert tree.array("fH")[20].values.tolist() == check
@pytest.mark.parametrize("use_http", [False, True])
@pytest.mark.skip(reason="http://scikit-hep.org/uproot/examples/Event.root moved")
def test_branch_auto_interpretation(self, use_http):
# The aim is to reduce this list in a controlled manner
known_branches_without_interp = [
b'event',
b'TObject',
b'fClosestDistance',
b'fEvtHdr',
b'fTracks',
b'fTracks.fPointValue',
b'fTriggerBits',
b'fTriggerBits.TObject'
]
if use_http:
pytest.importorskip("requests")
tree = uproot3.open("http://scikit-hep.org/uproot3/examples/Event.root")["T"]
else:
path = os.path.join("tests", "samples", "Event.root")
if not os.path.exists(path):
raise pytest.skip()
tree = uproot3.open(path)["T"]
branches_without_interp = [b.name for b in tree.allvalues() if b.interpretation is None]
assert branches_without_interp == known_branches_without_interp
assert tree.array("fTracks.fTArray[3]", entrystop=10)[5][10].tolist() == [11.03951644897461, 19.40645980834961, 34.54059982299805]
assert tree.array("fTracks.fCharge", entrystop=10)[0][0:10].tolist() == [1.0, 1.0, 1.0, 1.0,-1.0, 0.0, 1.0, 0.0, 0.0, 0.0]
assert tree.array("fMatrix[4][4]", entrystop=10)[0][1].tolist() == [-0.13630907237529755, 0.8007842898368835, 1.706235647201538, 0.0]
assert tree.array("fTracks.fMass2", entrystop=10)[3][330:333].tolist() == [8.90625, 8.90625, 8.90625]
assert tree.array("fTracks.fBx", entrystop=10)[9][10:13].tolist() == [0.12298583984375, -0.2489013671875, -0.189697265625]
assert tree.array("fTracks.fBy", entrystop=10)[9][10:13].tolist() == [0.1998291015625, -0.0301513671875, 0.0736083984375]
assert tree.array("fTracks.fXfirst", entrystop=10)[1][11:16].tolist() == [-8.650390625, -2.8203125, -1.949951171875, 0.4859619140625, 3.0146484375]
assert tree.array("fTracks.fXlast", entrystop=10)[1][11:16].tolist() == [-2.18994140625, -2.64697265625, -8.4375, 1.594970703125, 6.40234375]
assert tree.array("fTracks.fYfirst", entrystop=10)[2][22:26].tolist() == [4.9921875, 8.46875, 1.679443359375, -6.927734375]
assert tree.array("fTracks.fYlast", entrystop=10)[2][22:26].tolist() == [-5.76171875, 13.7109375, 2.98583984375, -9.466796875]
assert tree.array("fTracks.fZfirst", entrystop=10)[3][33:36].tolist() == [53.84375, 52.3125, 48.296875]
assert tree.array("fTracks.fZlast", entrystop=10)[3][33:36].tolist() == [193.96875, 208.25, 228.40625]
assert tree.array("fTracks.fVertex[3]", entrystop=10)[1][2].tolist() == [0.245361328125, 0.029296875,-16.171875]
def test_leaflist(self):
tree = uproot3.open("tests/samples/leaflist.root")["tree"]
a = tree.array("leaflist")
assert a["x"].tolist() == [1.1, 2.2, 3.3, 4.0, 5.5] # yeah, I goofed up when making it
assert a["y"].tolist() == [1, 2, 3, 4, 5]
assert a["z"].tolist() == [ord("a"), ord("b"), ord("c"), ord("d"), ord("e")]
pytest.importorskip("pandas")
assert tree.pandas.df()["leaflist.x"].tolist() == [1.1, 2.2, 3.3, 4.0, 5.5]
tree = uproot3.open("tests/samples/HZZ-objects.root")["events"]
tree.pandas.df("muonp4")
tree.pandas.df("muonp4", flatten=False)
df = tree.pandas.df("eventweight", entrystart=100, entrystop=200)
index = df.index.tolist()
assert min(index) == 100
assert max(index) == 199
df = tree.pandas.df("muonp4", entrystart=100, entrystop=200)
index = df.index.get_level_values("entry").tolist()
assert min(index) == 100
assert max(index) == 199
def test_mempartitions(self):
t = uproot3.open("tests/samples/sample-5.23.02-zlib.root")["sample"]
assert list(t.mempartitions(500)) == [(0, 2), (2, 4), (4, 6), (6, 8), (8, 10), (10, 12), (12, 14), (14, 16), (16, 18), (18, 20), (20, 22), (22, 24), (24, 26), (26, 28), (28, 30)]
assert [sum(y.nbytes for y in x.values()) for x in t.iterate(entrysteps="0.5 kB")] == [693, 865, 822, 779, 951, 695, 867, 824, 781, 953, 695, 867, 824, 781, 953]
| bsd-3-clause |
gfyoung/pandas | pandas/tests/indexes/test_common.py | 1 | 15842 | """
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
msg = (
"Cannot remove 1 levels from an index with 1 levels: at least one "
"level must be left."
)
with pytest.raises(ValueError, match=msg):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
msg = r"index 101 is out of bounds for axis 0 with size [\d]+|" + re.escape(
"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean arrays are valid indices"
)
with pytest.raises(IndexError, match=msg):
index[itm]
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, (MultiIndex, pd.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_drop_duplicates(self, index, keep):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
tm.assert_index_equal(result_dropped, unique_idx)
# validate shallow copy
assert result_dropped is not unique_idx
def test_drop_duplicates_inplace(self, index):
msg = r"drop_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.drop_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not len(index) or isinstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
# RangeIndex is unique by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_unique is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_astype_preserves_name(self, index, dtype):
# https://github.com/pandas-dev/pandas/issues/32013
if isinstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
warn = None
if dtype in ["int64", "uint64"]:
if needs_i8_conversion(index.dtype):
warn = FutureWarning
elif (
isinstance(index, DatetimeIndex)
and index.tz is not None
and dtype == "datetime64[ns]"
):
# This astype is deprecated in favor of tz_localize
warn = FutureWarning
try:
# Some of these conversions cannot succeed so we use a try / except
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = index.astype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if isinstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_ravel_deprecation(self, index):
# GH#19956 ravel returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.ravel()
def test_asi8_deprecation(self, index):
# GH#37877
if isinstance(index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn):
index.asi8
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_values_invalid_na_position(request, index_with_missing, na_position):
if isinstance(index_with_missing, MultiIndex):
request.node.add_marker(
pytest.mark.xfail(
reason="missing value sorting order not defined for index type"
)
)
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_with_missing(request, index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
if isinstance(index_with_missing, MultiIndex):
request.node.add_marker(
pytest.mark.xfail(reason="missing value sorting order not implemented")
)
elif isinstance(index_with_missing, CategoricalIndex):
pytest.skip("missing value sorting order not well-defined")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_values(na_position=na_position)
tm.assert_index_equal(result, expected)
| bsd-3-clause |
srowen/spark | python/pyspark/pandas/data_type_ops/binary_ops.py | 5 | 2976 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Union, cast
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.types import BinaryType, BooleanType, StringType
class BinaryOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with BinaryType.
"""
@property
def pretty_name(self) -> str:
return "binaries"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BinaryType):
return column_op(F.concat)(left, right)
elif isinstance(right, bytes):
return column_op(F.concat)(left, SF.lit(right))
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bytes):
return cast(
SeriesOrIndex, left._with_new_scol(F.concat(SF.lit(right), left.spark.column))
)
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype)
else:
return _as_other_type(index_ops, dtype, spark_type)
| apache-2.0 |
ChanderG/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 87 | 3903 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
maxiee/MyCodes | KalmanAndBesianFiltersInPython/Chapter5_One_dimen_kalman/utils/mkf_internal.py | 1 | 8661 | # -*- coding: utf-8 -*-
"""
Created on Thu May 1 16:56:49 2014
@author: rlabbe
"""
import numpy as np
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from numpy.random import multivariate_normal
import stats
def show_residual_chart():
plt.xlim([0.9,2.5])
plt.ylim([1.5,3.5])
plt.scatter ([1,2,2],[2,3,2.3])
plt.scatter ([2],[2.8],marker='o')
ax = plt.axes()
ax.annotate('', xy=(2,3), xytext=(1,2),
arrowprops=dict(arrowstyle='->', ec='#004080',
lw=2,
shrinkA=3, shrinkB=4))
ax.annotate('prediction', xy=(2.04,3.), color='#004080')
ax.annotate('measurement', xy=(2.05, 2.28))
ax.annotate('prior estimate', xy=(1, 1.9))
ax.annotate('residual', xy=(2.04,2.6), color='#e24a33')
ax.annotate('new estimate', xy=(2,2.8),xytext=(2.1,2.8),
arrowprops=dict(arrowstyle='->', ec="k", shrinkA=3, shrinkB=4))
ax.annotate('', xy=(2,3), xytext=(2,2.3),
arrowprops=dict(arrowstyle="-",
ec="#e24a33",
lw=2,
shrinkA=5, shrinkB=5))
plt.title("Kalman Filter Predict and Update")
plt.axis('equal')
plt.show()
def show_position_chart():
""" Displays 3 measurements at t=1,2,3, with x=1,2,3"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.xlim([0,4]);
plt.ylim([0,4])
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlabel("X")
plt.ylabel("Y")
plt.xticks(np.arange(1,4,1))
plt.yticks(np.arange(1,4,1))
plt.show()
def show_position_prediction_chart():
""" displays 3 measurements, with the next position predicted"""
plt.scatter ([1,2,3], [1,2,3], s=128, color='#004080')
plt.annotate('t=1', xy=(1,1), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=2', xy=(2,2), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.annotate('t=3', xy=(3,3), xytext=(0,-10),
textcoords='offset points', ha='center', va='top')
plt.xlim([0,5])
plt.ylim([0,5])
plt.xlabel("Position")
plt.ylabel("Time")
plt.xticks(np.arange(1,5,1))
plt.yticks(np.arange(1,5,1))
plt.scatter ([4], [4], c='g',s=128, color='#8EBA42')
ax = plt.axes()
ax.annotate('', xy=(4,4), xytext=(3,3),
arrowprops=dict(arrowstyle='->',
ec='g',
shrinkA=6, shrinkB=5,
lw=3))
plt.show()
def show_x_error_chart(count):
""" displays x=123 with covariances showing error"""
plt.cla()
plt.gca().autoscale(tight=True)
cov = np.array([[0.03,0], [0,8]])
e = stats.covariance_ellipse (cov)
cov2 = np.array([[0.03,0], [0,4]])
e2 = stats.covariance_ellipse (cov2)
cov3 = np.array([[12,11.95], [11.95,12]])
e3 = stats.covariance_ellipse (cov3)
sigma=[1, 4, 9]
if count >= 1:
stats.plot_covariance_ellipse ((0,0), ellipse=e, variance=sigma)
if count == 2 or count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma)
if count == 3:
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r')
if count == 4:
M1 = np.array([[5, 5]]).T
m4, cov4 = stats.multivariate_multiply(M1, cov2, M1, cov3)
e4 = stats.covariance_ellipse (cov4)
stats.plot_covariance_ellipse ((5,5), ellipse=e, variance=sigma,
alpha=0.25)
stats.plot_covariance_ellipse ((5,5), ellipse=e3, variance=sigma,
edgecolor='r', alpha=0.25)
stats.plot_covariance_ellipse (m4[:,0], ellipse=e4, variance=sigma)
#plt.ylim([0,11])
#plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Velocity")
plt.show()
def show_x_with_unobserved():
""" shows x=1,2,3 with velocity superimposed on top """
# plot velocity
sigma=[0.5,1.,1.5,2]
cov = np.array([[1,1],[1,1.1]])
stats.plot_covariance_ellipse ((2,2), cov=cov, variance=sigma, axis_equal=False)
# plot positions
cov = np.array([[0.003,0], [0,12]])
sigma=[0.5,1.,1.5,2]
e = stats.covariance_ellipse (cov)
stats.plot_covariance_ellipse ((1,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((2,1), ellipse=e, variance=sigma, axis_equal=False)
stats.plot_covariance_ellipse ((3,1), ellipse=e, variance=sigma, axis_equal=False)
# plot intersection cirle
isct = Ellipse(xy=(2,2), width=.2, height=1.2, edgecolor='r', fc='None', lw=4)
plt.gca().add_artist(isct)
plt.ylim([0,11])
plt.xlim([0,4])
plt.xticks(np.arange(1,4,1))
plt.xlabel("Position")
plt.ylabel("Time")
plt.show()
def plot_3d_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid (xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([x,y]),mean,cov) \
for x,y in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax = plt.figure().add_subplot(111, projection='3d')
ax.plot_surface(xv, yv, zv, rstride=1, cstride=1, cmap=cm.autumn)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.autumn)
ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.BuGn)
def plot_3d_sampled_covariance(mean, cov):
""" plots a 2x2 covariance matrix positioned at mean. mean will be plotted
in x and y, and the probability in the z axis.
Parameters
----------
mean : 2x1 tuple-like object
mean for x and y coordinates. For example (2.3, 7.5)
cov : 2x2 nd.array
the covariance matrix
"""
# compute width and height of covariance ellipse so we can choose
# appropriate ranges for x and y
o,w,h = stats.covariance_ellipse(cov,3)
# rotate width and height to x,y axis
wx = abs(w*np.cos(o) + h*np.sin(o))*1.2
wy = abs(h*np.cos(o) - w*np.sin(o))*1.2
# ensure axis are of the same size so everything is plotted with the same
# scale
if wx > wy:
w = wx
else:
w = wy
minx = mean[0] - w
maxx = mean[0] + w
miny = mean[1] - w
maxy = mean[1] + w
count = 1000
x,y = multivariate_normal(mean=mean, cov=cov, size=count).T
xs = np.arange(minx, maxx, (maxx-minx)/40.)
ys = np.arange(miny, maxy, (maxy-miny)/40.)
xv, yv = np.meshgrid (xs, ys)
zs = np.array([100.* stats.multivariate_gaussian(np.array([xx,yy]),mean,cov) \
for xx,yy in zip(np.ravel(xv), np.ravel(yv))])
zv = zs.reshape(xv.shape)
ax = plt.figure().add_subplot(111, projection='3d')
ax.scatter(x,y, [0]*count, marker='.')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.contour(xv, yv, zv, zdir='x', offset=minx-1, cmap=cm.autumn)
ax.contour(xv, yv, zv, zdir='y', offset=maxy, cmap=cm.BuGn)
if __name__ == "__main__":
#show_position_chart()
#plot_3d_covariance((2,7), np.array([[8.,0],[0,4.]]))
#plot_3d_sampled_covariance([2,7], [[8.,0],[0,4.]])
#show_residual_chart()
#show_position_chart()
show_x_error_chart(4)
| gpl-3.0 |
ArteliaTelemac/PostTelemac | PostTelemac/meshlayerparsers/libs_telemac/samplers/meshes.py | 2 | 18033 | """@author Sebastien E. Bourban
"""
"""@note ... this work is based on a collaborative effort between
.________. ,--.
| | . ( (
|,-. / HR Wallingford EDF - LNHE / \_ \_/ .--.
/ \ / Howbery Park, 6, quai Watier \ ) /_ )
,. `' Wallingford, Oxfordshire 78401 Cedex `-'_ __ `--
/ \ / OX10 8BA, United Kingdom Chatou, France __/ \ \ `.
/ `-'| www.hrwallingford.com innovation.edf.com | ) ) )
!________! `--' `--
"""
"""@history 12/12/2012 -- Sebastien E. Bourban
Many methods developped for application to meshes. The latest
one being about subdivision of meshes.
"""
"""@brief
Tools for sampling and interpolating through triangular meshes
"""
"""@details
Contains ...
"""
"""@history 20/06/2013 -- Sebastien E. Bourban
A new method, sliceMesh, now replaces crossMesh and all of the
Ray Tracing algorithms. The later will remain for the fame and
maybe for future uses, but sliveMesh should now be used.
"""
# _____ ___________________________________________________
# ____/ Imports /__________________________________________________/
#
# ~~> dependencies towards standard python
import sys
from os import path
import numpy as np
import math
from scipy.spatial import cKDTree
import matplotlib.path as mplPath
# from scipy.spatial import Delaunay
from matplotlib.tri import Triangulation
sys.path.append(path.join(path.dirname(sys.argv[0]), ".."))
# ~~> dependencies towards other modules
from ..config import OptionParser
# ~~> dependencies towards other pytel/modules
from ..parsers.parserSELAFIN import SELAFIN
from ..utilstelemac.progressbar import ProgressBar
from ..utilstelemac.geometry import (
isCCW,
getSegmentIntersection,
getBarycentricWeights,
isInsideTriangle,
getDistancePointToLine,
)
from ..samplers.polygons import isClockwise, joinSegments
# _____ ___________________________________________
# ____/ General Toolbox /__________________________________________/
#
def nearLocateMesh(xyo, IKLE, MESHX, MESHY, tree=None):
"""
Requires the scipy.spatial and the matplotlib.tri packages to be loaded.
- Will use already computed tree or re-create it if necessary.
- Will use already computed neighbourhood or re-create it if necessary.
This function return the element number for the triangle including xyo=(xo,yo)
or -1 if the (xo,yo) is outside the mesh
Return: the element, the barycentric weights, and the tree and the neighbourhood if computed
"""
# ~~> Create the KDTree of the iso-barycentres
if tree == None:
isoxy = np.column_stack((np.sum(MESHX[IKLE], axis=1) / 3.0, np.sum(MESHY[IKLE], axis=1) / 3.0))
tree = cKDTree(isoxy)
# ~~> Find the indices corresponding to the nearest elements to the points
inear = -1
for d, i in zip(*tree.query(xyo, 8)):
ax, bx, cx = MESHX[IKLE[i]]
ay, by, cy = MESHY[IKLE[i]]
w = isInsideTriangle(xyo, (ax, ay), (bx, by), (cx, cy), nomatter=True)
if w != []:
return i, w, tree
if inear < 0:
inear = i
dnear = d
if dnear > d:
inear = i
dnear = d
# ~~> Find the indices and weights corresponding to the element containing the point
ax, bx, cx = MESHX[IKLE[inear]]
ay, by, cy = MESHY[IKLE[inear]]
return inear, isInsideTriangle(xyo, (ax, ay), (bx, by), (cx, cy), nomatter=False), tree
def dichoLocateMesh(rank, e1, xy1, e2, xy2, IKLE, MESHX, MESHY, tree):
"""
Will find at least one point between xy1 and xy2 that is within the mesh
"""
# ~~ Position the middle point ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
xyo = [(xy1[0] + xy2[0]) / 2.0, (xy1[1] + xy2[1]) / 2.0]
eo, bo, tree = nearLocateMesh(xyo, IKLE, MESHX, MESHY, tree)
if bo != []:
return True, eo, xyo, bo
# ~~ Limit the number of useless dichotomies ~~~~~~~~~~~~~~~~~~~~
rank = rank + 1
if rank > 3:
return False, eo, xyo, bo
# ~~ Sub-segments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
found, ej, xyj, bj = dichoLocateMesh(rank, e1, xy1, eo, xyo, IKLE, MESHX, MESHY, tree)
if found:
return found, ej, xyj, bj
found, ej, xyj, bj = dichoLocateMesh(rank, eo, xyo, e2, xy2, IKLE, MESHX, MESHY, tree)
if found:
return found, ej, xyj, bj
return False, eo, xyo, bo
def xyTraceMesh(inear, xyi, xyo, IKLE, MESHX, MESHY, neighbours=None):
"""
Requires the matplotlib.tri package to be loaded.
- Will use already computed neighbourhood or re-create it if necessary.
This function return the element number for the triangle including xyo=(xo,yo)
or -1 if the (xo,yo) is outside the mesh. It creates the neighbouring connectivity
map and ray-traces from xyi to xyo
Return: whether xyo was found within the mesh, the (nearest) element, the
associated barycentric weights, and the neighbourhood if computed
"""
if neighbours == None:
neighbours = Triangulation(MESHX, MESHY, IKLE).get_cpp_triangulation().get_neighbors()
found, ray = traceRay2XY(IKLE, MESHX, MESHY, neighbours, inear, xyi, inear, xyo)
return found, ray, neighbours
def subdivideMesh(IKLE, MESHX, MESHY):
"""
Requires the matplotlib.tri package to be loaded.
- Will use already computed edges or re-create it if necessary.
This function return a new tuple IKLE,MESHX,MESHY where each triangle has been
subdivided in 4.
"""
# ~~> Singling out edges
from matplotlib.tri import Triangulation
edges = Triangulation(MESHX, MESHY, IKLE).get_cpp_triangulation().get_edges()
# ~~> Memory allocation for new MESH
IELEM = len(IKLE)
IPOIN = len(MESHX)
IEDGE = len(edges)
JKLE = np.zeros((IELEM * 4, 3), dtype=np.int) # you subdivide every elements by 4
MESHJ = np.zeros((IEDGE, 2), dtype=np.int) # you add one point on every edges
# ~~> Lookup tables for node numbering on common edges
pa, pb = edges.T
k1b, k1a = np.sort(np.take(IKLE, [0, 1], axis=1)).T
indx1 = np.searchsorted(pa, k1a)
jndx1 = np.searchsorted(pa, k1a, side="right")
k2b, k2a = np.sort(np.take(IKLE, [1, 2], axis=1)).T
indx2 = np.searchsorted(pa, k2a)
jndx2 = np.searchsorted(pa, k2a, side="right")
k3b, k3a = np.sort(np.take(IKLE, [2, 0], axis=1)).T
indx3 = np.searchsorted(pa, k3a)
jndx3 = np.searchsorted(pa, k3a, side="right")
# ~~> Building one triangle at a time /!\ Please get this loop parallelised
j = 0
for i in range(IELEM):
k1 = indx1[i] + np.searchsorted(pb[indx1[i] : jndx1[i]], k1b[i])
k2 = indx2[i] + np.searchsorted(pb[indx2[i] : jndx2[i]], k2b[i])
k3 = indx3[i] + np.searchsorted(pb[indx3[i] : jndx3[i]], k3b[i])
# ~~> New connectivity JKLE
JKLE[j] = [IKLE[i][0], IPOIN + k1, IPOIN + k3]
JKLE[j + 1] = [IKLE[i][1], IPOIN + k2, IPOIN + k1]
JKLE[j + 2] = [IKLE[i][2], IPOIN + k3, IPOIN + k2]
JKLE[j + 3] = [IPOIN + k1, IPOIN + k2, IPOIN + k3]
# ~~> New interpolation references for values and coordinates
MESHJ[k1] = [IKLE[i][0], IKLE[i][1]]
MESHJ[k2] = [IKLE[i][1], IKLE[i][2]]
MESHJ[k3] = [IKLE[i][2], IKLE[i][0]]
j += 4
# ~~> Reset IPOBO while you are at it
MESHX = np.resize(MESHX, IPOIN + IEDGE)
MESHY = np.resize(MESHY, IPOIN + IEDGE)
MESHX[IPOIN:] = np.sum(MESHX[MESHJ], axis=1) / 2.0
MESHY[IPOIN:] = np.sum(MESHY[MESHJ], axis=1) / 2.0
neighbours = Triangulation(MESHX, MESHY, JKLE).get_cpp_triangulation().get_neighbors()
JPOBO = np.zeros(IPOIN + IEDGE, np.int)
for n in range(IELEM * 4):
s1, s2, s3 = neighbours[n]
e1, e2, e3 = JKLE[n]
if s1 < 0:
JPOBO[e1] = e1 + 1
JPOBO[e2] = e2 + 1
if s2 < 0:
JPOBO[e2] = e2 + 1
JPOBO[e3] = e3 + 1
if s3 < 0:
JPOBO[e3] = e3 + 1
JPOBO[e1] = e1 + 1
return JKLE, MESHX, MESHY, JPOBO, MESHJ
def traceRay2XY(IKLE, MESHX, MESHY, neighbours, ei, xyi, en, xyn):
"""
This assumes that you cannot go back on your ray.
"""
# ~~> latest addition to the ray
ax, bx, cx = MESHX[IKLE[en]]
ay, by, cy = MESHY[IKLE[en]]
bi = getBarycentricWeights(xyi, (ax, ay), (bx, by), (cx, cy))
pnt = {
"n": 1,
"xy": [xyi],
"e": [en],
"b": [bi],
"d": [np.power(xyi[0] - xyn[0], 2) + np.power(xyi[1] - xyn[1], 2)],
}
# ~~> convergence on distance to target xyn
accuracy = np.power(10.0, -5 + np.floor(np.log10(abs(ax + bx + cx + ay + by + cy))))
if pnt["d"][0] < accuracy:
return True, pnt
# ~~> get the ray through to the farthest neighbouring edges
ks = []
ds = []
for k in [0, 1, 2]:
xyj = getSegmentIntersection(
(MESHX[IKLE[en][k]], MESHY[IKLE[en][k]]),
(MESHX[IKLE[en][(k + 1) % 3]], MESHY[IKLE[en][(k + 1) % 3]]),
xyi,
xyn,
)
if xyj == []:
continue # there are no intersection with that edges
ej = neighbours[en][k]
if ej == ei:
continue # you should not back track on your ray
xyj = xyj[0]
dij = np.power(xyi[0] - xyj[0], 2) + np.power(xyi[1] - xyj[1], 2)
ks.append(k)
ds.append(dij)
if ds != []:
k = ks[np.argmax(ds)]
ej = neighbours[en][k]
xyj = getSegmentIntersection(
(MESHX[IKLE[en][k]], MESHY[IKLE[en][k]]),
(MESHX[IKLE[en][(k + 1) % 3]], MESHY[IKLE[en][(k + 1) % 3]]),
xyi,
xyn,
)[0]
djn = np.power(xyn[0] - xyj[0], 2) + np.power(xyn[1] - xyj[1], 2)
# ~~> Possible recursive call
if True or djn > accuracy: # /!\ this may be a problem
if ej < 0:
# you have reach the end of the line
bj = getBarycentricWeights(xyj, (ax, ay), (bx, by), (cx, cy))
pnt["n"] += 1
pnt["xy"].insert(0, xyj)
pnt["e"].insert(0, en)
pnt["b"].insert(0, bj)
pnt["d"].insert(0, djn)
return djn < accuracy, pnt
else:
found, ray = traceRay2XY(IKLE, MESHX, MESHY, neighbours, en, xyj, ej, xyn)
ray["n"] += 1
ray["xy"].append(xyi)
ray["e"].append(en)
ray["b"].append(bi)
ray["d"].append(dij)
return found, ray
# ~~> convergence on having found the appropriate triangle
bn = isInsideTriangle(xyn, (ax, ay), (bx, by), (cx, cy))
if bn != []:
pnt["n"] += 1
pnt["xy"].insert(0, xyn)
pnt["e"].insert(0, en)
pnt["b"].insert(0, bn)
pnt["d"].insert(0, 0.0)
return True, pnt
# ~~> you should not be here !
return False, pnt
def xysLocateMesh(xyo, IKLE, MESHX, MESHY, tree=None, neighbours=None):
# ~~> get to the nearest element
oet = -1
obr = [0.0, 0.0, 0.0]
eo, bo, tree = nearLocateMesh(np.array(xyo), IKLE, MESHX, MESHY, tree)
if bo == []:
found, ray, neighbours = xyTraceMesh(
eo, [np.sum(MESHX[IKLE[eo]]) / 3.0, np.sum(MESHY[IKLE[eo]]) / 3.0], xyo, IKLE, MESHX, MESHY, neighbours
)
if found:
obr = ray["b"][ray["n"]]
oet = ray["e"][ray["n"]]
else:
obr = bo
oet = eo
if oet == -1:
return [-1, -1, -1], obr
return IKLE[oet], obr
def crossMesh(polyline, IKLE, MESHX, MESHY, tree=None, neighbours=None):
"""
"""
# ~~ Intersection nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ipt = []
iet = []
ibr = []
# ~~ Locate nodes of the polyline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
xyo = polyline[0]
eo, bo, tree = nearLocateMesh(xyo, IKLE, MESHX, MESHY, tree)
for i in range(len(polyline) - 1):
xyi = polyline[i + 1]
ei, bi, tree = nearLocateMesh(xyi, IKLE, MESHX, MESHY, tree)
if bo == [] and bi == []:
rank = 0
found, ej, xyj, bj = dichoLocateMesh(rank, eo, xyo, ei, xyi, IKLE, MESHX, MESHY, tree)
if not found:
print "... Could not find easily an intersection with the mesh"
sys.exit(1)
found, rayo, neighbours = xyTraceMesh(ej, xyj, xyo, IKLE, MESHX, MESHY, neighbours)
# print 'raya'
for j in range(rayo["n"])[:-1]:
# print rayo['e'][j],rayo['xy'][j]
ipt.append(rayo["xy"][j])
iet.append(rayo["e"][j])
ibr.append(rayo["b"][j])
found, rayi, neighbours = xyTraceMesh(ej, xyj, xyi, IKLE, MESHX, MESHY, neighbours)
# print 'rayb'
for j in range(rayi["n"])[(rayi["n"] - 1) :: -1]:
# print rayi['e'][j],rayi['xy'][j]
ipt.append(rayi["xy"][j])
iet.append(rayi["e"][j])
ibr.append(rayi["b"][j])
elif bi == [] and bo != []:
found, rayi, neighbours = xyTraceMesh(eo, xyo, xyi, IKLE, MESHX, MESHY, neighbours)
# print 'rayc'
for j in range(rayi["n"])[(rayi["n"] - 1) :: -1]:
# print rayi['e'][j],rayi['xy'][j]
ipt.append(rayi["xy"][j])
iet.append(rayi["e"][j])
ibr.append(rayi["b"][j])
elif bi != [] and bo == []:
# it is necessary to reverse the ray for a case with first end outside
found, rayo, neighbours = xyTraceMesh(ei, xyi, xyo, IKLE, MESHX, MESHY, neighbours)
# print 'rayd'
for j in range(rayo["n"]): # [(rayo['n']-1)::-1]:
# print rayo['e'][j],rayo['xy'][j]
ipt.append(rayo["xy"][j])
iet.append(rayo["e"][j])
ibr.append(rayo["b"][j])
else:
found, rayi, neighbours = xyTraceMesh(eo, xyo, xyi, IKLE, MESHX, MESHY, neighbours)
# print 'rayi',rayi
for j in range(rayi["n"])[(rayi["n"] - 1) :: -1]:
# print rayi['e'][j],rayi['xy'][j]
ipt.append(rayi["xy"][j])
iet.append(rayi["e"][j])
ibr.append(rayi["b"][j])
xyo = xyi
bo = bi
eo = ei
return (ipt, iet, ibr), tree, neighbours
def sliceMesh(polyline, IKLE, MESHX, MESHY, tree=None):
"""
A new method to slice through a triangular mesh (replaces crossMesh)
"""
from matplotlib.tri import Triangulation
xys = []
douplets = []
# ~~> Calculate the minimum mesh resolution
dxy = math.sqrt(
min(
np.square(np.sum(np.fabs(MESHX[IKLE] - MESHX[np.roll(IKLE, 1)]), axis=1) / 3.0)
+ np.square(np.sum(np.fabs(MESHY[IKLE] - MESHY[np.roll(IKLE, 1)]), axis=1) / 3.0)
)
)
accuracy = np.power(10.0, -8 + np.floor(np.log10(dxy)))
xyo = np.array(polyline[0])
for i in range(len(polyline) - 1):
xyi = np.array(polyline[i + 1])
dio = math.sqrt(sum(np.square(xyo - xyi)))
# ~~> Resample the line to that minimum mesh resolution
rsmpline = np.dstack(
(np.linspace(xyo[0], xyi[0], num=int(dio / dxy)), np.linspace(xyo[1], xyi[1], num=int(dio / dxy)))
)[0]
nbpoints = len(rsmpline)
nbneighs = min(8, len(IKLE))
# ~~> Filter closest 8 elements (please create a good mesh) as a halo around the polyline
halo = np.zeros((nbpoints, nbneighs), dtype=np.int)
for i in range(nbpoints):
d, e = tree.query(rsmpline[i], nbneighs)
halo[i] = e
halo = np.unique(halo)
# ~~> Get the intersecting halo (on a smaller mesh connectivity)
edges = Triangulation(MESHX, MESHY, IKLE[halo]).get_cpp_triangulation().get_edges()
# ~~> Last filter, all nodes that are on the polyline
olah = []
nodes = np.unique(edges)
for node in nodes: # TODO(jcp): replace by numpy calcs
if getDistancePointToLine((MESHX[node], MESHY[node]), xyo, xyi) < accuracy:
olah.append(node)
ijsect = zip(olah, olah)
xysect = [(MESHX[i], MESHY[i]) for i in olah]
lmsect = [(1.0, 0.0) for i in range(len(ijsect))]
mask = np.zeros((len(edges), 2), dtype=bool)
for i in olah:
mask = np.logical_or(edges == i, mask)
edges = np.compress(np.logical_not(np.any(mask, axis=1)), edges, axis=0)
# ~~> Intersection with remaining edges
for edge in edges:
xyj = getSegmentIntersection((MESHX[edge[0]], MESHY[edge[0]]), (MESHX[edge[1]], MESHY[edge[1]]), xyo, xyi)
if xyj != []:
ijsect.append(edge) # nodes from the mesh
xysect.append(tuple(xyj[0])) # intersection (xo,yo)
lmsect.append((xyj[1], 1.0 - xyj[1])) # weight along each each
# ~~> Final sorting along keys x and y
xysect = np.array(xysect, dtype=[("x", "<f4"), ("y", "<f4")])
xysort = np.argsort(xysect, order=("x", "y"))
# ~~> Move on to next point
for i in xysort:
xys.append(xysect[i])
douplets.append((ijsect[i], lmsect[i]))
xyo = xyi
return xys, douplets
# _____ ________________________________________________
# ____/ MAIN CALL /_______________________________________________/
#
__author__ = "Sebastien E. Bourban"
__date__ = "$12-Dec-2012 08:51:29$"
if __name__ == "__main__":
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print "\n\nMy work is done\n\n"
sys.exit(0)
| gpl-3.0 |
Slayr/Data-Science-45min-Intros | choosing-k-in-kmeans/gap_stats.py | 25 | 7550 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
# Modified from:
# (c) 2014 Reid Johnson
#
# Modified from:
# (c) 2013 Mikael Vejdemo-Johansson
# BSD License
#
#
# The gap statistic is defined by Tibshirani, Walther, Hastie in:
# Estimating the number of clusters in a data set via the gap statistic
# J. R. Statist. Soc. B (2001) 63, Part 2, pp 411-423
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.spatial.distance
import scipy.stats
from sklearn.cluster import KMeans
dst = sp.spatial.distance.euclidean
def gap_statistics(data, refs=None, nrefs=10, ks=range(1,11)):
"""Computes the gap statistics for an nxm dataset.
The gap statistic measures the difference between within-cluster dispersion on an input
dataset and that expected under an appropriate reference null distribution.
Computation of the gap statistic, then, requires a series of reference (null) distributions.
One may either input a precomputed set of reference distributions (via the parameter refs)
or specify the number of reference distributions (via the parameter nrefs) for automatic
generation of uniform distributions within the bounding box of the dataset (data).
Each computation of the gap statistic requires the clustering of the input dataset and of
several reference distributions. To identify the optimal number of clusters k, the gap
statistic is computed over a range of possible values of k (via the parameter ks).
For each value of k, within-cluster dispersion is calculated for the input dataset and each
reference distribution. The calculation of the within-cluster dispersion for the reference
distributions will have a degree of variation, which we measure by standard deviation or
standard error.
The estimated optimal number of clusters, then, is defined as the smallest value k such that
gap_k is greater than or equal to the sum of gap_k+1 minus the expected error err_k+1.
Args:
data ((n,m) SciPy array): The dataset on which to compute the gap statistics.
refs ((n,m,k) SciPy array, optional): A precomputed set of reference distributions.
Defaults to None.
nrefs (int, optional): The number of reference distributions for automatic generation.
Defaults to 20.
ks (list, optional): The list of values k for which to compute the gap statistics.
Defaults to range(1,11), which creates a list of values from 1 to 10.
Returns:
gaps: an array of gap statistics computed for each k.
errs: an array of standard errors (se), with one corresponding to each gap computation.
difs: an array of differences between each gap_k and the sum of gap_k+1 minus err_k+1.
"""
shape = data.shape
if refs==None:
tops = data.max(axis=0) # maxima along the first axis (rows)
bots = data.min(axis=0) # minima along the first axis (rows)
dists = sp.matrix(sp.diag(tops-bots)) # the bounding box of the input dataset
# Generate nrefs uniform distributions each in the half-open interval [0.0, 1.0)
rands = sp.random.random_sample(size=(shape[0],shape[1], nrefs))
# Adjust each of the uniform distributions to the bounding box of the input dataset
for i in range(nrefs):
rands[:,:,i] = rands[:,:,i]*dists+bots
else:
rands = refs
gaps = sp.zeros((len(ks),)) # array for gap statistics (lenth ks)
errs = sp.zeros((len(ks),)) # array for model standard errors (length ks)
difs = sp.zeros((len(ks)-1,)) # array for differences between gaps (length ks-1)
for (i,k) in enumerate(ks): # iterate over the range of k values
# Cluster the input dataset via k-means clustering using the current value of k
kmeans = KMeans(n_clusters=k, n_init=2, n_jobs=-1).fit(data)
(kmc, kml) = kmeans.cluster_centers_, kmeans.labels_
# Generate within-dispersion measure for the clustering of the input dataset
disp = sum([dst(data[m,:],kmc[kml[m],:]) for m in range(shape[0])])
# Generate within-dispersion measures for the clusterings of the reference datasets
refdisps = sp.zeros((rands.shape[2],))
for j in range(rands.shape[2]):
# Cluster the reference dataset via k-means clustering using the current value of k
kmeans = KMeans(n_clusters=k, n_init=2, n_jobs=-1).fit(rands[:,:,j])
(kmc, kml) = kmeans.cluster_centers_, kmeans.labels_
refdisps[j] = sum([dst(rands[m,:,j],kmc[kml[m],:]) for m in range(shape[0])])
# Compute the (estimated) gap statistic for k
gaps[i] = sp.mean(sp.log(refdisps) - sp.log(disp))
# Compute the expected error for k
errs[i] = sp.sqrt(sum(((sp.log(refdisp)-sp.mean(sp.log(refdisps)))**2) \
for refdisp in refdisps)/float(nrefs)) * sp.sqrt(1+1/nrefs)
# Compute the difference between gap_k and the sum of gap_k+1 minus err_k+1
difs = sp.array([gaps[k] - (gaps[k+1]-errs[k+1]) for k in range(len(gaps)-1)])
#print "Gaps: " + str(gaps)
#print "Errs: " + str(errs)
#print "Difs: " + str(difs)
return gaps, errs, difs
def plot_gap_statistics(gaps, errs, difs):
"""Generates and shows plots for the gap statistics.
A figure with two subplots is generated. The first subplot is an errorbar plot of the
estimated gap statistics computed for each value of k. The second subplot is a barplot
of the differences in the computed gap statistics computed.
Args:
gaps (SciPy array): An array of gap statistics, one computed for each k.
errs (SciPy array): An array of standard errors (se), with one corresponding to each gap
computation.
difs (SciPy array): An array of differences between each gap_k and the sum of gap_k+1
minus err_k+1.
"""
# Create a figure
fig = plt.figure(figsize=(8,8))
#plt.subplots_adjust(wspace=0.35) # adjust the distance between figures
# Subplot 1
ax = fig.add_subplot(211)
ind = range(1,len(gaps)+1) # the x values for the gaps
# Create an errorbar plot
rects = ax.errorbar(ind, gaps, yerr=errs, xerr=None, linewidth=1.0)
# Add figure labels and ticks
ax.set_title('Clustering Gap Statistics', fontsize=16)
ax.set_xlabel('Number of clusters k', fontsize=14)
ax.set_ylabel('Gap Statistic', fontsize=14)
ax.set_xticks(ind)
# Add figure bounds
ax.set_ylim(0, max(gaps+errs)*1.1)
ax.set_xlim(0, len(gaps)+1.0)
# space b/w subplots
fig.subplots_adjust(hspace=.5)
# Subplot 2
ax = fig.add_subplot(212)
ind = range(1,len(difs)+1) # the x values for the difs
max_gap = None
if len(np.where(difs > 0)[0]) > 0:
max_gap = np.where(difs > 0)[0][0] + 1 # the k with the first positive dif
# Create a bar plot
ax.bar(ind, difs, alpha=0.5, color='g', align='center')
# Add figure labels and ticks
if max_gap:
ax.set_title('Clustering Gap Differences\n(k=%d Estimated as Optimal)' % (max_gap), \
fontsize=16)
else:
ax.set_title('Clustering Gap Differences\n', fontsize=16)
ax.set_xlabel('Number of clusters k', fontsize=14)
ax.set_ylabel('Gap Difference', fontsize=14)
ax.xaxis.set_ticks(range(1,len(difs)+1))
# Add figure bounds
ax.set_ylim(min(difs)*1.2, max(difs)*1.2)
ax.set_xlim(0, len(difs)+1.0)
# Show the figure
plt.show()
| unlicense |
verilylifesciences/purplequery | purplequery/bq_types_test.py | 1 | 10171 | # Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import datetime
import unittest
from typing import List, Optional, Sequence, Union # noqa: F401
import numpy as np
import pandas as pd
from ddt import data, ddt, unpack
from google.cloud.bigquery.schema import SchemaField
from purplequery.bq_types import PythonType # noqa: F401
from purplequery.bq_types import (BQArray, BQScalarType, BQType, TypedDataFrame, TypedSeries,
_coerce_names, implicitly_coerce)
# The NumPy types that are used to read in data into Pandas.
NumPyType = Union[np.bool_, np.datetime64, np.float64, np.string_]
@ddt
class BqTypesTest(unittest.TestCase):
@data(
(BQScalarType.BOOLEAN, SchemaField(name='foo', field_type='BOOLEAN')),
(BQScalarType.DATE, SchemaField(name='foo', field_type='DATE')),
(BQScalarType.DATETIME, SchemaField(name='foo', field_type='DATETIME')),
(BQScalarType.INTEGER, SchemaField(name='foo', field_type='INTEGER')),
(BQScalarType.FLOAT, SchemaField(name='foo', field_type='FLOAT')),
(BQScalarType.STRING, SchemaField(name='foo', field_type='STRING')),
(BQScalarType.TIMESTAMP, SchemaField(name='foo', field_type='TIMESTAMP')),
)
@unpack
def test_convert_between_schema_field_and_bq_type(self, bq_type, schema_field):
# type: (BQScalarType, SchemaField) -> None
# Test scalar
self.assertEqual(BQType.from_schema_field(schema_field), bq_type)
self.assertEqual(bq_type.to_schema_field('foo'), schema_field)
# Test array
schema_array_field = SchemaField(
name=schema_field.name,
field_type=schema_field.field_type,
mode='REPEATED')
bq_array_type = BQArray(bq_type)
self.assertEqual(BQType.from_schema_field(schema_array_field), bq_array_type)
self.assertEqual(bq_array_type.to_schema_field('foo'), schema_array_field)
@data(
(BQScalarType.BOOLEAN, SchemaField(name='foo', field_type='BOOL')),
(BQScalarType.INTEGER, SchemaField(name='foo', field_type='INT64')),
(BQScalarType.FLOAT, SchemaField(name='foo', field_type='FLOAT64')),
)
@unpack
def test_convert_from_standard_schema_field_to_bq_type(self, bq_type, schema_field):
# type: (BQScalarType, SchemaField) -> None
self.assertEqual(BQType.from_schema_field(schema_field), bq_type)
@data(
(BQScalarType.BOOLEAN, 'BOOL'),
(BQScalarType.INTEGER, 'int64'),
(BQScalarType.FLOAT, 'FLOAT64'),
(BQScalarType.BOOLEAN, 'boolean'),
(BQScalarType.DATE, 'DATE'),
(BQScalarType.DATETIME, 'datetime'),
(BQScalarType.INTEGER, 'INTEGER'),
(BQScalarType.FLOAT, 'FlOaT'),
(BQScalarType.STRING, 'STRING'),
(BQScalarType.TIMESTAMP, 'timeSTAMP'),
)
@unpack
def test_convert_from_string_to_bq_scalar_type(self, bq_type, string):
# type: (BQScalarType, str) -> None
self.assertEqual(BQScalarType.from_string(string), bq_type)
self.assertEqual(BQScalarType.from_string(string.lower()), bq_type)
@data(
(BQScalarType.BOOLEAN,),
(BQScalarType.DATE,),
(BQScalarType.DATETIME,),
(BQScalarType.INTEGER,),
(BQScalarType.FLOAT,),
(BQScalarType.STRING,),
(BQScalarType.TIMESTAMP,),
)
@unpack
def test_two_arrays_of_same_type_are_same_object(self, bq_type):
# type: (BQScalarType) -> None
# Type objects are immutable, and we need to be able to compare them
# (an array of ints is an array of ints, but it's not a string or an array of floats).
# A way to achieve this is to ensure that all types, including arrays, are singletons.
# So we test that for each scalar type, creating two arrays of it yields the same object.
a1 = BQArray(bq_type)
a2 = BQArray(bq_type)
self.assertIs(a1, a2)
@data(
(BQScalarType.BOOLEAN, np.bool_(True), True),
(BQScalarType.DATE, np.datetime64('2019-01-07'), datetime.date(2019, 1, 7)),
(BQScalarType.DATETIME, np.datetime64('2019-01-07T10:32:05.123456'),
datetime.datetime(2019, 1, 7, 10, 32, 5, 123456)),
(BQScalarType.INTEGER, np.float64(35.0), 35),
(BQScalarType.FLOAT, np.float64(12.34), 12.34),
(BQScalarType.STRING, np.string_('hello'), 'hello'),
(BQScalarType.TIMESTAMP, np.datetime64('2019-01-07T10:32:05.123456'),
datetime.datetime(2019, 1, 7, 10, 32, 5, 123456))
)
@unpack
def test_convert(self, bq_type, np_object, py_object):
# type: (BQScalarType, NumPyType, PythonType) -> None
# First, convert from a NumPy-typed object to a Pandas-typed object.
# Types are mostly the same except for np.datetime64 becomes pd.Timestamp
# We do this by creating a Pandas Series containing the single object, and then
# converting it to a sequence and extracting its single element.
pd_object, = pd.Series(np_object)
self.assertEqual(bq_type.convert(pd_object), py_object)
# Test that for any type, a NaN converts to None
self.assertIsNone(bq_type.convert(np.nan))
# Now test the same conversion for a list (array) of objects.
# Construct a Series containing a single row which is a list of three objects.
pd_array_object, = pd.Series([(pd_object,)*3])
self.assertEqual(BQArray(bq_type).convert(pd_array_object), (py_object,)*3)
# Test that for any Array type, a NaN converts to None
self.assertIsNone(BQArray(bq_type).convert(np.nan))
@data(
(BQScalarType.BOOLEAN, np.bool_),
(BQScalarType.DATE, 'datetime64[ns]'),
(BQScalarType.DATETIME, 'datetime64[ns]'),
(BQScalarType.INTEGER, np.float64),
(BQScalarType.FLOAT, np.float64),
(BQScalarType.STRING, np.string_),
(BQScalarType.TIMESTAMP, 'datetime64[ns]'),
)
@unpack
def test_to_dtype(self, bq_type, np_type):
# type: (BQScalarType, NumPyType) -> None
self.assertEqual(bq_type.to_dtype(), np.dtype(np_type))
# NumPy doesn't know from cell elements that are lists, so it just leaves it as an
# uninterpreted Python object.
self.assertEqual(BQArray(bq_type).to_dtype(), np.dtype('object'))
def test_get_typed_series_as_list(self):
typed_series = TypedSeries(
pd.Series([(np.float64(1.5), np.float64(2.5), np.float64(3.0)),
(np.float64(2.5), np.float64(3.5), np.float64(4.0))]),
BQArray(BQScalarType.FLOAT))
self.assertEqual(typed_series.to_list(),
[(1.5, 2.5, 3.0),
(2.5, 3.5, 4.0)])
def test_get_typed_dataframe_schema(self):
typed_dataframe = TypedDataFrame(pd.DataFrame(columns=['a', 'b']),
[BQScalarType.BOOLEAN,
BQArray(BQScalarType.FLOAT)])
self.assertEqual(typed_dataframe.to_bq_schema(),
[SchemaField(name='a', field_type='BOOLEAN'),
SchemaField(name='b', field_type='FLOAT', mode='REPEATED')])
def test_get_typed_dataframe_as_list_of_lists(self):
typed_dataframe = TypedDataFrame(
pd.DataFrame(
[[np.bool_(True), (np.float64(1.5), np.float64(2.5), np.float64(3.0))],
[np.bool_(False), (np.float64(2.5), np.float64(3.5), np.float64(4.0))]],
columns=['a', 'b']),
[BQScalarType.BOOLEAN,
BQArray(BQScalarType.FLOAT)])
self.assertEqual(typed_dataframe.to_list_of_lists(),
[[True, (1.5, 2.5, 3.0)],
[False, (2.5, 3.5, 4.0)]])
@data(
dict(names=[],
expected_name=None),
dict(names=['foo'],
expected_name='foo'),
dict(names=['foo', None, 'foo'],
expected_name='foo'),
)
@unpack
def test_coerce_names(self, names, expected_name):
# type: (Sequence[str], Optional[str]) -> None
self.assertEqual(expected_name, _coerce_names(names))
@data(
dict(names=['foo', 'bar']),
)
@unpack
def test_coerce_names_error(self, names):
# type: (Sequence[str]) -> None
with self.assertRaisesRegexp(ValueError, 'field names .* do not match'):
_coerce_names(names)
@data(
([BQScalarType.INTEGER], BQScalarType.INTEGER),
([None, BQScalarType.INTEGER], BQScalarType.INTEGER),
([BQScalarType.FLOAT], BQScalarType.FLOAT),
([BQScalarType.FLOAT, None], BQScalarType.FLOAT),
([BQScalarType.FLOAT, BQScalarType.FLOAT], BQScalarType.FLOAT),
([BQScalarType.STRING, BQScalarType.STRING], BQScalarType.STRING),
([BQScalarType.STRING, None, BQScalarType.STRING], BQScalarType.STRING),
([BQScalarType.INTEGER, BQScalarType.FLOAT], BQScalarType.FLOAT),
([BQScalarType.STRING, BQScalarType.DATE], BQScalarType.DATE),
([BQScalarType.STRING, BQScalarType.TIMESTAMP], BQScalarType.TIMESTAMP),
)
@unpack
def test_implicitly_coerce(self, input_types, expected_supertype):
# type: (List[BQScalarType], BQScalarType) -> None
supertype = implicitly_coerce(*input_types)
self.assertEqual(supertype, expected_supertype)
@data(
([], "No types provided to merge"),
([BQScalarType.STRING, BQScalarType.INTEGER],
"Cannot implicitly coerce the given types:"),
([BQScalarType.STRING, BQScalarType.DATE, BQScalarType.TIMESTAMP],
"Cannot implicitly coerce the given types:"),
)
@unpack
def test_implicitly_coerce_error(self, input_types, error):
# type: (List[BQScalarType], str) -> None
with self.assertRaisesRegexp(ValueError, error):
implicitly_coerce(*input_types)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
xubenben/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
merenlab/anvio | anvio/variability.py | 1 | 18803 | # -*- coding: utf-8
# pylint: disable=line-too-long
"""Classes to make sense of single nucleotide variation"""
import copy
import numpy as np
import anvio
from anvio.errors import ConfigError
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
class VariablityTestFactory:
def __init__(self, params={'b': 2, 'm': 1.45, 'c': 0.05}):
self.params = params
if self.params:
self.b, self.m, self.c = self.params['b'], self.params['m'], self.params['c']
else:
self.b, self.m, self.c = None, None, None
def get_min_acceptable_departure_from_reference(self, coverage):
"""Get minimum allowable departure from consensus
Notes
=====
- 0 returned if self.params is None
- https://i.imgur.com/oVoDcbT.png (b=2, m=1.45, c=0.05)
- https://www.desmos.com/calculator/qwocua4zi5 (interactive plot to tune b, m , and c)
"""
if self.params is None:
if hasattr(coverage, '__len__'):
return np.zeros(len(coverage))
else:
return 0
return (1 / self.b) ** (coverage ** (1 / self.b) - self.m) + self.c
class ProcessAlleleCounts:
def __init__(self, allele_counts, allele_to_array_index, sequence, sequence_as_index=None, min_coverage_for_variability=1, test_class=None, additional_per_position_data={}):
"""A class to process raw variability information for a given allele counts array
Creates self.d, a dictionary of equal-length arrays that describes information related to
variability.
Parameters
==========
allele_counts : array-like
An allele counts array. Each column is a position in the sequence, and each row is an
allele (e.g. A, C, T, G, N if alleles are nucleotides).
allele_to_array_index : dict
Which allele belongs at which row index? If A is row 0, C is row 1, etc, the dictionary
should be {'A': 0, 'C': 1, ...}.
sequence : str
What sequence is this for? It should have length equal to number of columns of
allele_counts
sequence_as_index : None
allele_to_array_index provides the means to convert sequence into its index-form.
However, this requires an expensive list comprehension. If you have already calculated
the sequence as an index, be sure you provide it here. If you don't provide anything, it
will be calculated at high cost
min_coverage_for_variability : int, 1
positions below this coverage value will be filtered out
test_class : VariablityTestFactory, None
If not None, positions will be filtered out if they are deemed not worth reporting
additional_per_position_data : dict, {}
This class creates self.d, a dictionary of equal length arrays that describes
information related to variability. If the user has _other_ data for each position in
this sequence, they can pass it with parameter. For example, if the user has a
True/False _array_ (not list) that states whether each position is an outlier position
relative to a contig, they could pass a dictionary {'cov_outlier_in_contig':
np.array([True, True, ...])}, where the array is the same length as `sequence`. This
array will be added to self.d, and will be appropriately filtered alongside the other
variables
Notes
=====
- Originally self.d was a pandas dataframe. While this approach made the code very
readable and simple to write, it was extremely slow.
- If you are analyzing nucleotide, amino acid, or codon variability, you should use the
inheriting classes ProcessNucleotideCounts, ProcessAminoAcidCounts, and ProcessCodonCounts
"""
self.d = copy.copy(additional_per_position_data)
for key in self.d:
if len(self.d[key]) != allele_counts.shape[1]:
raise ConfigError("ProcessAlleleCounts :: key '%s' in your passed data dictionary \
has %d positions, but sequence has %d." % (key, len(self.d[key]), len(sequence)))
if len(sequence) != allele_counts.shape[1]:
raise ConfigError("ProcessAlleleCounts :: allele_counts has %d positions, but sequence has %d." \
% (len(sequence), allele_counts.shape[1]))
if len(allele_to_array_index) != allele_counts.shape[0]:
raise ConfigError("ProcessAlleleCounts :: allele_counts has %d rows, but the allele_to_array_index dictionary has %d." \
% (allele_counts.shape[0], len(allele_to_array_index)))
self.min_coverage_for_variability = min_coverage_for_variability
self.test_class = test_class
# dictionaries to convert to/from array-row-index and allele
self.allele_to_array_index = allele_to_array_index
self.array_index_to_allele = {v: k for k, v in self.allele_to_array_index.items()}
self.d['pos'] = np.arange(len(sequence))
self.d['allele_counts'] = allele_counts
self.d['reference'] = np.array(list(sequence))
if sequence_as_index is not None:
self.sequence_as_index_provided = True
self.d['sequence_as_index'] = sequence_as_index
else:
self.sequence_as_index_provided = False
if self.min_coverage_for_variability < 1:
raise ConfigError("ProcessAlleleCounts :: self.min_coverage_for_variability must be at least 1, currently %d" % self.min_coverage_for_variability)
def process(self, skip_competing_items=False):
"""The main function call of this class. Populates self.d"""
if self.get_data_length() == 0:
return False
# remove positions that have non-allowed characters in the sequence
self.filter_or_dont(self.get_boolean_of_allowable_characters_in_reference(), kind='boolean')
if self.get_data_length() == 0:
return False
if not self.sequence_as_index_provided:
self.d['sequence_as_index'] = np.array([self.allele_to_array_index[item] for item in self.d['reference']])
self.d['coverage'] = self.get_coverage()
# Filter if some positions are not well-covered
indices_to_keep = self.get_indices_above_coverage_threshold(self.d['coverage'], self.min_coverage_for_variability)
self.filter_or_dont(indices_to_keep)
if self.get_data_length() == 0:
return False
self.d['reference_coverage'] = self.get_reference_coverage()
self.d['departure_from_reference'] = self.get_departure_from_reference(self.d['reference_coverage'], self.d['coverage'])
# Filter if some positions were not worth reporting
indices_to_keep = self.get_positions_worth_reporting(self.d['coverage'], self.d['departure_from_reference'])
self.filter_or_dont(indices_to_keep)
if self.get_data_length() == 0:
return False
if not skip_competing_items:
self.d['competing_items'] = self.get_competing_items(self.d['reference_coverage'], self.d['coverage'])
# Filter if any competing items are None
indices_to_keep = self.get_positions_with_competing_items(self.d['competing_items'])
self.filter_or_dont(indices_to_keep)
if self.get_data_length() == 0:
return False
# each allele gets its own key in self.d
for index, item in self.array_index_to_allele.items():
self.d[item] = self.d['allele_counts'][index, :]
# Delete intermediate keys
del self.d['allele_counts']
del self.d['sequence_as_index']
del self.d['reference_coverage']
return True
def get_data_length(self):
"""Get the length of data (number of positions with alleles)"""
return len(self.d['reference'])
def filter(self, keys):
"""Filters self.d. keys can be an array-like of indices or array-like of booleans"""
for key in self.d:
if self.d[key].ndim == 1:
self.d[key] = self.d[key][keys]
else:
self.d[key] = self.d[key][:, keys]
def filter_or_dont(self, keys, kind='indices'):
"""Filter self.d if it is required
Parameters
==========
keys : array-like
What should be used to filter? If kind == 'indices', it should be an array of indices to
keep, If kind == 'boolean', it should be an array of booleans
kind : str, 'indices'
Either 'indices' or 'boolean'. See keys for what each means
"""
if kind == 'indices':
if len(keys) == len(self.d['pos']):
# Nothing to filter
return
elif kind == 'boolean':
if sum(keys) == len(keys):
# Nothing to filter
return
self.filter(keys)
def get_coverage(self):
return np.sum(self.d['allele_counts'], axis=0)
def get_reference_coverage(self):
return self.d['allele_counts'][self.d['sequence_as_index'], np.arange(self.d['allele_counts'].shape[1])]
def get_departure_from_reference(self, reference_coverage=None, coverage=None):
if reference_coverage is None:
reference_coverage = self.get_reference_coverage()
if coverage is None:
coverage = self.get_coverage()
return 1 - reference_coverage/coverage
def get_competing_items(self, reference_coverage=None, coverage=None):
if reference_coverage is None:
reference_coverage = self.get_reference_coverage()
if coverage is None:
coverage = self.get_coverage()
n = self.d['allele_counts'].shape[1]
# as a first pass, sort the row indices (-allele_counts_array is used to sort from highest -> lowest)
competing_items_as_index = np.argsort(-self.d['allele_counts'], axis=0)
# take the top 2 items
competing_items_as_index = competing_items_as_index[:2, :]
# get the coverage of the second item
coverage_second_item = self.d['allele_counts'][competing_items_as_index[1, :], np.arange(n)]
# if the coverage of the second item is 0, set the second index equal to the first
competing_items_as_index[1, :] = np.where(coverage_second_item == 0, competing_items_as_index[0, :], competing_items_as_index[1, :])
# sort the competing nts
competing_items_as_index = np.sort(competing_items_as_index, axis=0)
# make the competing nts list
nts_1 = [self.array_index_to_allele[index_1] for index_1 in competing_items_as_index[0, :]]
nts_2 = [self.array_index_to_allele[index_2] for index_2 in competing_items_as_index[1, :]]
competing_items = np.fromiter((nt_1 + nt_2 for nt_1, nt_2 in zip(nts_1, nts_2)), np.dtype('<U2'), count=n)
# If the second item is 0, and the reference is the first item, set competing_items to None.
# This can easily be checked by seeing if reference_coverage == coverage
competing_items = np.where(reference_coverage == coverage, None, competing_items)
return competing_items
def get_boolean_of_allowable_characters_in_reference(self, sequence=None):
if sequence is None:
sequence = self.d['reference']
items_in_sequence = set(sequence)
for item in items_in_sequence:
if item not in self.allele_to_array_index:
return [item in self.allele_to_array_index for item in sequence]
return [True] * len(sequence)
def get_indices_above_coverage_threshold(self, coverage=None, threshold=None):
if coverage is None:
coverage = self.get_coverage()
if threshold is None:
threshold = self.min_coverage_for_variability
return np.where(coverage >= threshold)[0]
def get_positions_worth_reporting(self, coverage, departure_from_reference):
worth_reporting = np.array([True] * len(coverage))
if not self.test_class:
return worth_reporting
threshold = self.test_class.get_min_acceptable_departure_from_reference(coverage)
return np.where(departure_from_reference >= threshold)[0]
def get_positions_with_competing_items(self, competing_items):
return np.where(competing_items != None)[0]
def rename_key(self, from_this, to_that):
if from_this in self.d:
self.d[to_that] = self.d.pop(from_this)
class ProcessNucleotideCounts(ProcessAlleleCounts):
def __init__(self, *args, **kwargs):
ProcessAlleleCounts.__init__(self, *args, **kwargs)
def process(self, *args, **kwargs):
p = ProcessAlleleCounts.process(self, *args, **kwargs)
self.rename_key('competing_items', 'competing_nts')
return p
class ProcessAminoAcidCounts(ProcessAlleleCounts):
def __init__(self, *args, **kwargs):
ProcessAlleleCounts.__init__(self, *args, **kwargs)
def process(self, *args, **kwargs):
p = ProcessAlleleCounts.process(self, *args, **kwargs)
self.rename_key('competing_items', 'competing_aas')
return p
class ProcessCodonCounts(ProcessAlleleCounts):
def __init__(self, *args, **kwargs):
ProcessAlleleCounts.__init__(self, *args, **kwargs)
def process(self, *args, **kwargs):
p = ProcessAlleleCounts.process(self, *args, **kwargs)
self.rename_key('competing_items', 'competing_codons')
self.rename_key('pos', 'codon_order_in_gene')
return p
class ProcessIndelCounts(object):
def __init__(self, indels, coverage, min_indel_fraction=0, test_class=None, min_coverage_for_variability=1):
"""A class to process raw variability information for a given allele counts array
Creates self.d, a dictionary of equal-length arrays that describes information related to
variability.
Parameters
==========
indels : dictionary
A dictionary that looks like this:
{
6279666787066445523: OrderedDict([
('split_name', 'IGD_000000000532_split_00001'),
('pos', 2),
('pos_in_contig', 2),
('corresponding_gene_call', 25396),
('in_noncoding_gene_call', 0),
('in_coding_gene_call', 1),
('base_pos_in_codon', 3),
('codon_order_in_gene', 0),
('cov_outlier_in_split', 1),
('cov_outlier_in_contig', 1),
('reference', 'T'),
('type', 'INS'),
('sequence', 'CTGACGGCT'),
('length', 9),
('count', 1)
]),
-5035942137885303221: OrderedDict([
('split_name', 'IGD_000000000532_split_00001'),
('pos', 0),
('pos_in_contig', 0),
('corresponding_gene_call', 25396),
('in_noncoding_gene_call', 0),
('in_coding_gene_call', 1),
('base_pos_in_codon', 1),
('codon_order_in_gene', 0),
('cov_outlier_in_split', 1),
('cov_outlier_in_contig', 1),
('reference', 'G'),
('type', 'INS'),
('sequence', 'CTCACGG'),
('length', 7),
('count', 1)
]),
...
}
The keys are unique identifiers. The OrderedDicts should have at least the key `pos`,
but there are no restrictions on what other keys it may have.
coverage : array
What is the coverage for the sequence this is for? This should have length equal to sequence
test_class : VariablityTestFactory, None
If not None, indels will be filtered out if they are deemed not worth reporting
min_coverage_for_variability : int, 1
positions below this coverage value will be filtered out
"""
self.indels = indels
self.coverage = coverage
self.test_class = test_class if test_class is not None else VariablityTestFactory(params=None)
self.min_coverage_for_variability = min_coverage_for_variability
def process(self):
"""Modify self.indels"""
indel_hashes_to_remove = set()
for indel_hash in self.indels:
indel = self.indels[indel_hash]
pos = indel['pos']
# Calculate coverage
if indel['type'] == 'INS':
if pos == len(self.coverage)-1:
# This is the last position in the sequence. so coverage based off only the
# NT left of the indel
cov = self.coverage[pos]
else:
# The coverage is the average of the coverage left and right of the
# insertion
cov = (self.coverage[pos] + self.coverage[pos+1])/2
else:
# The coverage is the average of the NT coverages that the deletion occurs over
cov = np.mean(self.coverage[pos:pos+indel['length']])
# Filter the entry if need be
if cov < self.min_coverage_for_variability:
# coverage of corresponding position is not high enough
indel_hashes_to_remove.add(indel_hash)
continue
# NOTE We call get_min_acceptable_departure_from_reference, yet the threshold value it
# spits out is being compared to count/coverage, since there is no "departure from
# reference" for indels.
if indel['count']/cov < self.test_class.get_min_acceptable_departure_from_reference(cov):
# indel count is not high enough compared to coverage value
indel_hashes_to_remove.add(indel_hash)
continue
self.indels[indel_hash]['coverage'] = cov
self.indels = {k: v for k, v in self.indels.items() if k not in indel_hashes_to_remove}
| gpl-3.0 |
bikong2/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
mhdella/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
pohzhiee/ghetto_omr | imgproc_dev1.py | 1 | 2510 | import numpy as np
import cv2
import matplotlib.pyplot as plt
import imgproc_funcfile as imgfunc
#OMS Info Input
n_col= 5
n_row= 9
path='img_data/omstest4.jpg'
#Settings
##Bubble
show_bubble=1 #0=No, #1=Yes
bubble_colour=(255,0,0)
bubble_linethickness=5 #integer value >0
##Centre Point
show_cntpt=1 #0=No, #1=Yes
cntpt_colour=(255,0,0)
cntpt_size=5 #integer value >0
##Matching Stringency
match_coeff=0.01
#-----------------------------------------------------------------
#Part 1: Image Loading
#load image
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
img2= cv2.imread(path)
img3=cv2.cvtColor(img.copy(),cv2.COLOR_GRAY2RGB)
#Image Processing (bilateral filter, sharpen, thresh, negative, closed)
outline=imgfunc.outlining(img)
#---------------------------------------------------------------------------------------------
#Part 2: Finding Valid Contours
#maximum contour shape matching coefficient, valid mean Area
#obtain centre point,hor_dist,ver_dist,shape type,shape dimension
contour_trunc,sum_array,ave_sim_val = imgfunc.contouring(outline,match_coeff)
centpt_array,mean_hor_dist,mean_ver_dist,shape_type,shape_dimension=imgfunc.get_centre(contour_trunc,sum_array,ave_sim_val)
#---------------------------------------------------------------------------------------------
#Part3:Forming Grids
#Assumption: Bubbles are arranged in horinzontal x vertical grid (but not oblique)
#form grid as matrix grid
matrix_grid = imgfunc.formgrid(centpt_array,mean_hor_dist,mean_ver_dist)
#form optimised matrix grid
opt_matrix_grid=imgfunc.OptMatFunc(matrix_grid,n_row,n_col)
print opt_matrix_grid
#---------------------------------------------------------------------------------------------
#converting optimised matrix grid into modified matrix for output
mod_matrix_grid=imgfunc.ModMatrGrid(opt_matrix_grid)
#---------------------------------------------------------------------------------------------
# Adding appropriate shapes and centre points into img3
if show_bubble==1:
imgoutput=imgfunc.DrawShape(img3,mod_matrix_grid,shape_type,shape_dimension,bubble_colour,bubble_linethickness)
if show_cntpt==1:
imgoutput=imgfunc.DrawCentrePoint(imgoutput,mod_matrix_grid,cntpt_colour,cntpt_size)
#---------------------------------------------------------------------------------------------
print "----------------------------------------"
#initialise plot
plt.subplot(111),plt.imshow(imgoutput)
plt.title('dilate1 Image'), plt.xticks([]), plt.yticks([])
plt.show()
cv2.waitKey(0)
| gpl-3.0 |
OshynSong/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
BryanCutler/spark | python/pyspark/ml/feature.py | 2 | 212807 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since, keyword_only, SparkContext
from pyspark.ml.linalg import _convert_to_vector
from pyspark.ml.param.shared import HasThreshold, HasThresholds, HasInputCol, HasOutputCol, \
HasInputCols, HasOutputCols, HasHandleInvalid, HasRelativeError, HasFeaturesCol, HasLabelCol, \
HasSeed, HasNumFeatures, HasStepSize, HasMaxIter, TypeConverters, Param, Params
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaTransformer, _jvm
from pyspark.ml.common import inherit_doc
__all__ = ['Binarizer',
'BucketedRandomProjectionLSH', 'BucketedRandomProjectionLSHModel',
'Bucketizer',
'ChiSqSelector', 'ChiSqSelectorModel',
'CountVectorizer', 'CountVectorizerModel',
'DCT',
'ElementwiseProduct',
'FeatureHasher',
'HashingTF',
'IDF', 'IDFModel',
'Imputer', 'ImputerModel',
'IndexToString',
'Interaction',
'MaxAbsScaler', 'MaxAbsScalerModel',
'MinHashLSH', 'MinHashLSHModel',
'MinMaxScaler', 'MinMaxScalerModel',
'NGram',
'Normalizer',
'OneHotEncoder', 'OneHotEncoderModel',
'PCA', 'PCAModel',
'PolynomialExpansion',
'QuantileDiscretizer',
'RobustScaler', 'RobustScalerModel',
'RegexTokenizer',
'RFormula', 'RFormulaModel',
'SQLTransformer',
'StandardScaler', 'StandardScalerModel',
'StopWordsRemover',
'StringIndexer', 'StringIndexerModel',
'Tokenizer',
'UnivariateFeatureSelector', 'UnivariateFeatureSelectorModel',
'VarianceThresholdSelector', 'VarianceThresholdSelectorModel',
'VectorAssembler',
'VectorIndexer', 'VectorIndexerModel',
'VectorSizeHint',
'VectorSlicer',
'Word2Vec', 'Word2VecModel']
@inherit_doc
class Binarizer(JavaTransformer, HasThreshold, HasThresholds, HasInputCol, HasOutputCol,
HasInputCols, HasOutputCols, JavaMLReadable, JavaMLWritable):
"""
Binarize a column of continuous features given a threshold. Since 3.0.0,
:py:class:`Binarize` can map multiple columns at once by setting the :py:attr:`inputCols`
parameter. Note that when both the :py:attr:`inputCol` and :py:attr:`inputCols` parameters
are set, an Exception will be thrown. The :py:attr:`threshold` parameter is used for
single column usage, and :py:attr:`thresholds` is for multiple columns.
.. versionadded:: 1.4.0
Examples
--------
>>> df = spark.createDataFrame([(0.5,)], ["values"])
>>> binarizer = Binarizer(threshold=1.0, inputCol="values", outputCol="features")
>>> binarizer.setThreshold(1.0)
Binarizer...
>>> binarizer.setInputCol("values")
Binarizer...
>>> binarizer.setOutputCol("features")
Binarizer...
>>> binarizer.transform(df).head().features
0.0
>>> binarizer.setParams(outputCol="freqs").transform(df).head().freqs
0.0
>>> params = {binarizer.threshold: -0.5, binarizer.outputCol: "vector"}
>>> binarizer.transform(df, params).head().vector
1.0
>>> binarizerPath = temp_path + "/binarizer"
>>> binarizer.save(binarizerPath)
>>> loadedBinarizer = Binarizer.load(binarizerPath)
>>> loadedBinarizer.getThreshold() == binarizer.getThreshold()
True
>>> loadedBinarizer.transform(df).take(1) == binarizer.transform(df).take(1)
True
>>> df2 = spark.createDataFrame([(0.5, 0.3)], ["values1", "values2"])
>>> binarizer2 = Binarizer(thresholds=[0.0, 1.0])
>>> binarizer2.setInputCols(["values1", "values2"]).setOutputCols(["output1", "output2"])
Binarizer...
>>> binarizer2.transform(df2).show()
+-------+-------+-------+-------+
|values1|values2|output1|output2|
+-------+-------+-------+-------+
| 0.5| 0.3| 1.0| 0.0|
+-------+-------+-------+-------+
...
"""
threshold = Param(Params._dummy(), "threshold",
"Param for threshold used to binarize continuous features. " +
"The features greater than the threshold will be binarized to 1.0. " +
"The features equal to or less than the threshold will be binarized to 0.0",
typeConverter=TypeConverters.toFloat)
thresholds = Param(Params._dummy(), "thresholds",
"Param for array of threshold used to binarize continuous features. " +
"This is for multiple columns input. If transforming multiple columns " +
"and thresholds is not set, but threshold is set, then threshold will " +
"be applied across all columns.",
typeConverter=TypeConverters.toListFloat)
@keyword_only
def __init__(self, *, threshold=0.0, inputCol=None, outputCol=None, thresholds=None,
inputCols=None, outputCols=None):
"""
__init__(self, \\*, threshold=0.0, inputCol=None, outputCol=None, thresholds=None, \
inputCols=None, outputCols=None)
"""
super(Binarizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid)
self._setDefault(threshold=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, threshold=0.0, inputCol=None, outputCol=None, thresholds=None,
inputCols=None, outputCols=None):
"""
setParams(self, \\*, threshold=0.0, inputCol=None, outputCol=None, thresholds=None, \
inputCols=None, outputCols=None)
Sets params for this Binarizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
class _LSHParams(HasInputCol, HasOutputCol):
"""
Mixin for Locality Sensitive Hashing (LSH) algorithm parameters.
"""
numHashTables = Param(Params._dummy(), "numHashTables", "number of hash tables, where " +
"increasing number of hash tables lowers the false negative rate, " +
"and decreasing it improves the running performance.",
typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_LSHParams, self).__init__(*args)
self._setDefault(numHashTables=1)
def getNumHashTables(self):
"""
Gets the value of numHashTables or its default value.
"""
return self.getOrDefault(self.numHashTables)
class _LSH(JavaEstimator, _LSHParams, JavaMLReadable, JavaMLWritable):
"""
Mixin for Locality Sensitive Hashing (LSH).
"""
def setNumHashTables(self, value):
"""
Sets the value of :py:attr:`numHashTables`.
"""
return self._set(numHashTables=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
class _LSHModel(JavaModel, _LSHParams):
"""
Mixin for Locality Sensitive Hashing (LSH) models.
"""
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"):
"""
Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
Notes
-----
This method is experimental and will likely change behavior in the next release.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
The dataset to search for nearest neighbors of the key.
key : :py:class:`pyspark.ml.linalg.Vector`
Feature vector representing the item to search for.
numNearestNeighbors : int
The maximum number of nearest neighbors.
distCol : str
Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key.
"""
return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors,
distCol)
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"):
"""
Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
Parameters
----------
datasetA : :py:class:`pyspark.sql.DataFrame`
One of the datasets to join.
datasetB : :py:class:`pyspark.sql.DataFrame`
Another dataset to join.
threshold : float
The threshold for the distance of row pairs.
distCol : str, optional
Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair.
"""
threshold = TypeConverters.toFloat(threshold)
return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
class _BucketedRandomProjectionLSHParams():
"""
Params for :py:class:`BucketedRandomProjectionLSH` and
:py:class:`BucketedRandomProjectionLSHModel`.
.. versionadded:: 3.0.0
"""
bucketLength = Param(Params._dummy(), "bucketLength", "the length of each hash bucket, " +
"a larger bucket lowers the false negative rate.",
typeConverter=TypeConverters.toFloat)
@since("2.2.0")
def getBucketLength(self):
"""
Gets the value of bucketLength or its default value.
"""
return self.getOrDefault(self.bucketLength)
@inherit_doc
class BucketedRandomProjectionLSH(_LSH, _BucketedRandomProjectionLSHParams,
HasSeed, JavaMLReadable, JavaMLWritable):
"""
LSH class for Euclidean distance metrics.
The input is dense or sparse vectors, each of which represents a point in the Euclidean
distance space. The output will be vectors of configurable dimension. Hash values in the same
dimension are calculated by the same hash function.
.. versionadded:: 2.2.0
Notes
-----
- `Stable Distributions in Wikipedia article on Locality-sensitive hashing \
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing#Stable_distributions>`_
- `Hashing for Similarity Search: A Survey <https://arxiv.org/abs/1408.2927>`_
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.dense([-1.0, -1.0 ]),),
... (1, Vectors.dense([-1.0, 1.0 ]),),
... (2, Vectors.dense([1.0, -1.0 ]),),
... (3, Vectors.dense([1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> brp = BucketedRandomProjectionLSH()
>>> brp.setInputCol("features")
BucketedRandomProjectionLSH...
>>> brp.setOutputCol("hashes")
BucketedRandomProjectionLSH...
>>> brp.setSeed(12345)
BucketedRandomProjectionLSH...
>>> brp.setBucketLength(1.0)
BucketedRandomProjectionLSH...
>>> model = brp.fit(df)
>>> model.getBucketLength()
1.0
>>> model.setOutputCol("hashes")
BucketedRandomProjectionLSHModel...
>>> model.transform(df).head()
Row(id=0, features=DenseVector([-1.0, -1.0]), hashes=[DenseVector([-1.0])])
>>> data2 = [(4, Vectors.dense([2.0, 2.0 ]),),
... (5, Vectors.dense([2.0, 3.0 ]),),
... (6, Vectors.dense([3.0, 2.0 ]),),
... (7, Vectors.dense([3.0, 3.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> model.approxNearestNeighbors(df2, Vectors.dense([1.0, 2.0]), 1).collect()
[Row(id=4, features=DenseVector([2.0, 2.0]), hashes=[DenseVector([1.0])], distCol=1.0)]
>>> model.approxSimilarityJoin(df, df2, 3.0, distCol="EuclideanDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("EuclideanDistance")).show()
+---+---+-----------------+
|idA|idB|EuclideanDistance|
+---+---+-----------------+
| 3| 6| 2.23606797749979|
+---+---+-----------------+
...
>>> model.approxSimilarityJoin(df, df2, 3, distCol="EuclideanDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("EuclideanDistance")).show()
+---+---+-----------------+
|idA|idB|EuclideanDistance|
+---+---+-----------------+
| 3| 6| 2.23606797749979|
+---+---+-----------------+
...
>>> brpPath = temp_path + "/brp"
>>> brp.save(brpPath)
>>> brp2 = BucketedRandomProjectionLSH.load(brpPath)
>>> brp2.getBucketLength() == brp.getBucketLength()
True
>>> modelPath = temp_path + "/brp-model"
>>> model.save(modelPath)
>>> model2 = BucketedRandomProjectionLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
"""
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
__init__(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
"""
super(BucketedRandomProjectionLSH, self).__init__()
self._java_obj = \
self._new_java_obj("org.apache.spark.ml.feature.BucketedRandomProjectionLSH", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
setParams(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
Sets params for this BucketedRandomProjectionLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setBucketLength(self, value):
"""
Sets the value of :py:attr:`bucketLength`.
"""
return self._set(bucketLength=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def _create_model(self, java_model):
return BucketedRandomProjectionLSHModel(java_model)
class BucketedRandomProjectionLSHModel(_LSHModel, _BucketedRandomProjectionLSHParams,
JavaMLReadable, JavaMLWritable):
r"""
Model fitted by :py:class:`BucketedRandomProjectionLSH`, where multiple random vectors are
stored. The vectors are normalized to be unit vectors and each vector is used in a hash
function: :math:`h_i(x) = floor(r_i \cdot x / bucketLength)` where :math:`r_i` is the
i-th random unit vector. The number of buckets will be `(max L2 norm of input vectors) /
bucketLength`.
.. versionadded:: 2.2.0
"""
@inherit_doc
class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols,
HasHandleInvalid, JavaMLReadable, JavaMLWritable):
"""
Maps a column of continuous features to a column of feature buckets. Since 3.0.0,
:py:class:`Bucketizer` can map multiple columns at once by setting the :py:attr:`inputCols`
parameter. Note that when both the :py:attr:`inputCol` and :py:attr:`inputCols` parameters
are set, an Exception will be thrown. The :py:attr:`splits` parameter is only used for single
column usage, and :py:attr:`splitsArray` is for multiple columns.
.. versionadded:: 1.4.0
Examples
--------
>>> values = [(0.1, 0.0), (0.4, 1.0), (1.2, 1.3), (1.5, float("nan")),
... (float("nan"), 1.0), (float("nan"), 0.0)]
>>> df = spark.createDataFrame(values, ["values1", "values2"])
>>> bucketizer = Bucketizer()
>>> bucketizer.setSplits([-float("inf"), 0.5, 1.4, float("inf")])
Bucketizer...
>>> bucketizer.setInputCol("values1")
Bucketizer...
>>> bucketizer.setOutputCol("buckets")
Bucketizer...
>>> bucketed = bucketizer.setHandleInvalid("keep").transform(df).collect()
>>> bucketed = bucketizer.setHandleInvalid("keep").transform(df.select("values1"))
>>> bucketed.show(truncate=False)
+-------+-------+
|values1|buckets|
+-------+-------+
|0.1 |0.0 |
|0.4 |0.0 |
|1.2 |1.0 |
|1.5 |2.0 |
|NaN |3.0 |
|NaN |3.0 |
+-------+-------+
...
>>> bucketizer.setParams(outputCol="b").transform(df).head().b
0.0
>>> bucketizerPath = temp_path + "/bucketizer"
>>> bucketizer.save(bucketizerPath)
>>> loadedBucketizer = Bucketizer.load(bucketizerPath)
>>> loadedBucketizer.getSplits() == bucketizer.getSplits()
True
>>> loadedBucketizer.transform(df).take(1) == bucketizer.transform(df).take(1)
True
>>> bucketed = bucketizer.setHandleInvalid("skip").transform(df).collect()
>>> len(bucketed)
4
>>> bucketizer2 = Bucketizer(splitsArray=
... [[-float("inf"), 0.5, 1.4, float("inf")], [-float("inf"), 0.5, float("inf")]],
... inputCols=["values1", "values2"], outputCols=["buckets1", "buckets2"])
>>> bucketed2 = bucketizer2.setHandleInvalid("keep").transform(df)
>>> bucketed2.show(truncate=False)
+-------+-------+--------+--------+
|values1|values2|buckets1|buckets2|
+-------+-------+--------+--------+
|0.1 |0.0 |0.0 |0.0 |
|0.4 |1.0 |0.0 |1.0 |
|1.2 |1.3 |1.0 |1.0 |
|1.5 |NaN |2.0 |2.0 |
|NaN |1.0 |3.0 |1.0 |
|NaN |0.0 |3.0 |0.0 |
+-------+-------+--------+--------+
...
"""
splits = \
Param(Params._dummy(), "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be of length >= 3 and strictly increasing. Values at -inf, inf must be " +
"explicitly provided to cover all Double values; otherwise, values outside the " +
"splits specified will be treated as errors.",
typeConverter=TypeConverters.toListFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries "
"containing NaN values. Values outside the splits will always be treated "
"as errors. Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (keep invalid values in a " +
"special additional bucket). Note that in the multiple column " +
"case, the invalid handling is applied to all columns. That said " +
"for 'error' it will throw an error if any invalids are found in " +
"any column, for 'skip' it will skip rows with any invalids in " +
"any columns, etc.",
typeConverter=TypeConverters.toString)
splitsArray = Param(Params._dummy(), "splitsArray", "The array of split points for mapping " +
"continuous features into buckets for multiple columns. For each input " +
"column, with n+1 splits, there are n buckets. A bucket defined by " +
"splits x,y holds values in the range [x,y) except the last bucket, " +
"which also includes y. The splits should be of length >= 3 and " +
"strictly increasing. Values at -inf, inf must be explicitly provided " +
"to cover all Double values; otherwise, values outside the splits " +
"specified will be treated as errors.",
typeConverter=TypeConverters.toListListFloat)
@keyword_only
def __init__(self, *, splits=None, inputCol=None, outputCol=None, handleInvalid="error",
splitsArray=None, inputCols=None, outputCols=None):
"""
__init__(self, \\*, splits=None, inputCol=None, outputCol=None, handleInvalid="error", \
splitsArray=None, inputCols=None, outputCols=None)
"""
super(Bucketizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid)
self._setDefault(handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, splits=None, inputCol=None, outputCol=None, handleInvalid="error",
splitsArray=None, inputCols=None, outputCols=None):
"""
setParams(self, \\*, splits=None, inputCol=None, outputCol=None, handleInvalid="error", \
splitsArray=None, inputCols=None, outputCols=None)
Sets params for this Bucketizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setSplits(self, value):
"""
Sets the value of :py:attr:`splits`.
"""
return self._set(splits=value)
@since("1.4.0")
def getSplits(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.splits)
@since("3.0.0")
def setSplitsArray(self, value):
"""
Sets the value of :py:attr:`splitsArray`.
"""
return self._set(splitsArray=value)
@since("3.0.0")
def getSplitsArray(self):
"""
Gets the array of split points or its default value.
"""
return self.getOrDefault(self.splitsArray)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
class _CountVectorizerParams(JavaParams, HasInputCol, HasOutputCol):
"""
Params for :py:class:`CountVectorizer` and :py:class:`CountVectorizerModel`.
"""
minTF = Param(
Params._dummy(), "minTF", "Filter to ignore rare words in" +
" a document. For each document, terms with frequency/count less than the given" +
" threshold are ignored. If this is an integer >= 1, then this specifies a count (of" +
" times the term must appear in the document); if this is a double in [0,1), then this " +
"specifies a fraction (out of the document's token count). Note that the parameter is " +
"only used in transform of CountVectorizerModel and does not affect fitting. Default 1.0",
typeConverter=TypeConverters.toFloat)
minDF = Param(
Params._dummy(), "minDF", "Specifies the minimum number of" +
" different documents a term must appear in to be included in the vocabulary." +
" If this is an integer >= 1, this specifies the number of documents the term must" +
" appear in; if this is a double in [0,1), then this specifies the fraction of documents." +
" Default 1.0", typeConverter=TypeConverters.toFloat)
maxDF = Param(
Params._dummy(), "maxDF", "Specifies the maximum number of" +
" different documents a term could appear in to be included in the vocabulary." +
" A term that appears more than the threshold will be ignored. If this is an" +
" integer >= 1, this specifies the maximum number of documents the term could appear in;" +
" if this is a double in [0,1), then this specifies the maximum" +
" fraction of documents the term could appear in." +
" Default (2^63) - 1", typeConverter=TypeConverters.toFloat)
vocabSize = Param(
Params._dummy(), "vocabSize", "max size of the vocabulary. Default 1 << 18.",
typeConverter=TypeConverters.toInt)
binary = Param(
Params._dummy(), "binary", "Binary toggle to control the output vector values." +
" If True, all nonzero counts (after minTF filter applied) are set to 1. This is useful" +
" for discrete probabilistic models that model binary events rather than integer counts." +
" Default False", typeConverter=TypeConverters.toBoolean)
def __init__(self, *args):
super(_CountVectorizerParams, self).__init__(*args)
self._setDefault(minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False)
@since("1.6.0")
def getMinTF(self):
"""
Gets the value of minTF or its default value.
"""
return self.getOrDefault(self.minTF)
@since("1.6.0")
def getMinDF(self):
"""
Gets the value of minDF or its default value.
"""
return self.getOrDefault(self.minDF)
@since("2.4.0")
def getMaxDF(self):
"""
Gets the value of maxDF or its default value.
"""
return self.getOrDefault(self.maxDF)
@since("1.6.0")
def getVocabSize(self):
"""
Gets the value of vocabSize or its default value.
"""
return self.getOrDefault(self.vocabSize)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
@inherit_doc
class CountVectorizer(JavaEstimator, _CountVectorizerParams, JavaMLReadable, JavaMLWritable):
"""
Extracts a vocabulary from document collections and generates a :py:attr:`CountVectorizerModel`.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame(
... [(0, ["a", "b", "c"]), (1, ["a", "b", "b", "c", "a"])],
... ["label", "raw"])
>>> cv = CountVectorizer()
>>> cv.setInputCol("raw")
CountVectorizer...
>>> cv.setOutputCol("vectors")
CountVectorizer...
>>> model = cv.fit(df)
>>> model.setInputCol("raw")
CountVectorizerModel...
>>> model.transform(df).show(truncate=False)
+-----+---------------+-------------------------+
|label|raw |vectors |
+-----+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+-----+---------------+-------------------------+
...
>>> sorted(model.vocabulary) == ['a', 'b', 'c']
True
>>> countVectorizerPath = temp_path + "/count-vectorizer"
>>> cv.save(countVectorizerPath)
>>> loadedCv = CountVectorizer.load(countVectorizerPath)
>>> loadedCv.getMinDF() == cv.getMinDF()
True
>>> loadedCv.getMinTF() == cv.getMinTF()
True
>>> loadedCv.getVocabSize() == cv.getVocabSize()
True
>>> modelPath = temp_path + "/count-vectorizer-model"
>>> model.save(modelPath)
>>> loadedModel = CountVectorizerModel.load(modelPath)
>>> loadedModel.vocabulary == model.vocabulary
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
>>> fromVocabModel = CountVectorizerModel.from_vocabulary(["a", "b", "c"],
... inputCol="raw", outputCol="vectors")
>>> fromVocabModel.transform(df).show(truncate=False)
+-----+---------------+-------------------------+
|label|raw |vectors |
+-----+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+-----+---------------+-------------------------+
...
"""
@keyword_only
def __init__(self, *, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18,
binary=False, inputCol=None, outputCol=None):
"""
__init__(self, \\*, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18,\
binary=False, inputCol=None,outputCol=None)
"""
super(CountVectorizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.CountVectorizer",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18,
binary=False, inputCol=None, outputCol=None):
"""
setParams(self, \\*, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18,\
binary=False, inputCol=None, outputCol=None)
Set the params for the CountVectorizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMinTF(self, value):
"""
Sets the value of :py:attr:`minTF`.
"""
return self._set(minTF=value)
@since("1.6.0")
def setMinDF(self, value):
"""
Sets the value of :py:attr:`minDF`.
"""
return self._set(minDF=value)
@since("2.4.0")
def setMaxDF(self, value):
"""
Sets the value of :py:attr:`maxDF`.
"""
return self._set(maxDF=value)
@since("1.6.0")
def setVocabSize(self, value):
"""
Sets the value of :py:attr:`vocabSize`.
"""
return self._set(vocabSize=value)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return CountVectorizerModel(java_model)
@inherit_doc
class CountVectorizerModel(JavaModel, _CountVectorizerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`CountVectorizer`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@classmethod
@since("2.4.0")
def from_vocabulary(cls, vocabulary, inputCol, outputCol=None, minTF=None, binary=None):
"""
Construct the model directly from a vocabulary list of strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jvocab = CountVectorizerModel._new_java_array(vocabulary, java_class)
model = CountVectorizerModel._create_from_java_class(
"org.apache.spark.ml.feature.CountVectorizerModel", jvocab)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if minTF is not None:
model.setMinTF(minTF)
if binary is not None:
model.setBinary(binary)
model._set(vocabSize=len(vocabulary))
return model
@property
@since("1.6.0")
def vocabulary(self):
"""
An array of terms in the vocabulary.
"""
return self._call_java("vocabulary")
@since("2.4.0")
def setMinTF(self, value):
"""
Sets the value of :py:attr:`minTF`.
"""
return self._set(minTF=value)
@since("2.4.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@inherit_doc
class DCT(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that takes the 1D discrete cosine transform
of a real vector. No zero padding is performed on the input vector.
It returns a real vector of the same length representing the DCT.
The return vector is scaled such that the transform matrix is
unitary (aka scaled DCT-II).
.. versionadded:: 1.6.0
Notes
-----
`More information on Wikipedia \
<https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Wikipedia>`_.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df1 = spark.createDataFrame([(Vectors.dense([5.0, 8.0, 6.0]),)], ["vec"])
>>> dct = DCT( )
>>> dct.setInverse(False)
DCT...
>>> dct.setInputCol("vec")
DCT...
>>> dct.setOutputCol("resultVec")
DCT...
>>> df2 = dct.transform(df1)
>>> df2.head().resultVec
DenseVector([10.969..., -0.707..., -2.041...])
>>> df3 = DCT(inverse=True, inputCol="resultVec", outputCol="origVec").transform(df2)
>>> df3.head().origVec
DenseVector([5.0, 8.0, 6.0])
>>> dctPath = temp_path + "/dct"
>>> dct.save(dctPath)
>>> loadedDtc = DCT.load(dctPath)
>>> loadedDtc.transform(df1).take(1) == dct.transform(df1).take(1)
True
>>> loadedDtc.getInverse()
False
"""
inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " +
"default False.", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, *, inverse=False, inputCol=None, outputCol=None):
"""
__init__(self, \\*, inverse=False, inputCol=None, outputCol=None)
"""
super(DCT, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid)
self._setDefault(inverse=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, inverse=False, inputCol=None, outputCol=None):
"""
setParams(self, \\*, inverse=False, inputCol=None, outputCol=None)
Sets params for this DCT.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setInverse(self, value):
"""
Sets the value of :py:attr:`inverse`.
"""
return self._set(inverse=value)
@since("1.6.0")
def getInverse(self):
"""
Gets the value of inverse or its default value.
"""
return self.getOrDefault(self.inverse)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@inherit_doc
class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Outputs the Hadamard product (i.e., the element-wise product) of each input vector
with a provided "weight" vector. In other words, it scales each column of the dataset
by a scalar multiplier.
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([2.0, 1.0, 3.0]),)], ["values"])
>>> ep = ElementwiseProduct()
>>> ep.setScalingVec(Vectors.dense([1.0, 2.0, 3.0]))
ElementwiseProduct...
>>> ep.setInputCol("values")
ElementwiseProduct...
>>> ep.setOutputCol("eprod")
ElementwiseProduct...
>>> ep.transform(df).head().eprod
DenseVector([2.0, 2.0, 9.0])
>>> ep.setParams(scalingVec=Vectors.dense([2.0, 3.0, 5.0])).transform(df).head().eprod
DenseVector([4.0, 3.0, 15.0])
>>> elementwiseProductPath = temp_path + "/elementwise-product"
>>> ep.save(elementwiseProductPath)
>>> loadedEp = ElementwiseProduct.load(elementwiseProductPath)
>>> loadedEp.getScalingVec() == ep.getScalingVec()
True
>>> loadedEp.transform(df).take(1) == ep.transform(df).take(1)
True
"""
scalingVec = Param(Params._dummy(), "scalingVec", "Vector for hadamard product.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, *, scalingVec=None, inputCol=None, outputCol=None):
"""
__init__(self, \\*, scalingVec=None, inputCol=None, outputCol=None)
"""
super(ElementwiseProduct, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, scalingVec=None, inputCol=None, outputCol=None):
"""
setParams(self, \\*, scalingVec=None, inputCol=None, outputCol=None)
Sets params for this ElementwiseProduct.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setScalingVec(self, value):
"""
Sets the value of :py:attr:`scalingVec`.
"""
return self._set(scalingVec=value)
@since("2.0.0")
def getScalingVec(self):
"""
Gets the value of scalingVec or its default value.
"""
return self.getOrDefault(self.scalingVec)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@inherit_doc
class FeatureHasher(JavaTransformer, HasInputCols, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
Feature hashing projects a set of categorical or numerical features into a feature vector of
specified dimension (typically substantially smaller than that of the original feature
space). This is done using the hashing trick (https://en.wikipedia.org/wiki/Feature_hashing)
to map features to indices in the feature vector.
The FeatureHasher transformer operates on multiple columns. Each column may contain either
numeric or categorical features. Behavior and handling of column data types is as follows:
* Numeric columns:
For numeric features, the hash value of the column name is used to map the
feature value to its index in the feature vector. By default, numeric features
are not treated as categorical (even when they are integers). To treat them
as categorical, specify the relevant columns in `categoricalCols`.
* String columns:
For categorical features, the hash value of the string "column_name=value"
is used to map to the vector index, with an indicator value of `1.0`.
Thus, categorical features are "one-hot" encoded
(similarly to using :py:class:`OneHotEncoder` with `dropLast=false`).
* Boolean columns:
Boolean values are treated in the same way as string columns. That is,
boolean features are represented as "column_name=true" or "column_name=false",
with an indicator value of `1.0`.
Null (missing) values are ignored (implicitly zero in the resulting feature vector).
Since a simple modulo is used to transform the hash function to a vector index,
it is advisable to use a power of two as the `numFeatures` parameter;
otherwise the features will not be mapped evenly to the vector indices.
.. versionadded:: 2.3.0
Examples
--------
>>> data = [(2.0, True, "1", "foo"), (3.0, False, "2", "bar")]
>>> cols = ["real", "bool", "stringNum", "string"]
>>> df = spark.createDataFrame(data, cols)
>>> hasher = FeatureHasher()
>>> hasher.setInputCols(cols)
FeatureHasher...
>>> hasher.setOutputCol("features")
FeatureHasher...
>>> hasher.transform(df).head().features
SparseVector(262144, {174475: 2.0, 247670: 1.0, 257907: 1.0, 262126: 1.0})
>>> hasher.setCategoricalCols(["real"]).transform(df).head().features
SparseVector(262144, {171257: 1.0, 247670: 1.0, 257907: 1.0, 262126: 1.0})
>>> hasherPath = temp_path + "/hasher"
>>> hasher.save(hasherPath)
>>> loadedHasher = FeatureHasher.load(hasherPath)
>>> loadedHasher.getNumFeatures() == hasher.getNumFeatures()
True
>>> loadedHasher.transform(df).head().features == hasher.transform(df).head().features
True
"""
categoricalCols = Param(Params._dummy(), "categoricalCols",
"numeric columns to treat as categorical",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, *, numFeatures=1 << 18, inputCols=None, outputCol=None,
categoricalCols=None):
"""
__init__(self, \\*, numFeatures=1 << 18, inputCols=None, outputCol=None, \
categoricalCols=None)
"""
super(FeatureHasher, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.FeatureHasher", self.uid)
self._setDefault(numFeatures=1 << 18)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, *, numFeatures=1 << 18, inputCols=None, outputCol=None,
categoricalCols=None):
"""
setParams(self, \\*, numFeatures=1 << 18, inputCols=None, outputCol=None, \
categoricalCols=None)
Sets params for this FeatureHasher.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def setCategoricalCols(self, value):
"""
Sets the value of :py:attr:`categoricalCols`.
"""
return self._set(categoricalCols=value)
@since("2.3.0")
def getCategoricalCols(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.categoricalCols)
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setNumFeatures(self, value):
"""
Sets the value of :py:attr:`numFeatures`.
"""
return self._set(numFeatures=value)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
Maps a sequence of terms to their term frequencies using the hashing trick.
Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32)
to calculate the hash code value for the term object.
Since a simple modulo is used to transform the hash function to a column index,
it is advisable to use a power of two as the numFeatures parameter;
otherwise the features will not be mapped evenly to the columns.
.. versionadded:: 1.3.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(inputCol="words", outputCol="features")
>>> hashingTF.setNumFeatures(10)
HashingTF...
>>> hashingTF.transform(df).head().features
SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0})
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0})
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> hashingTF.transform(df, params).head().vector
SparseVector(5, {0: 1.0, 2: 1.0, 3: 1.0})
>>> hashingTFPath = temp_path + "/hashing-tf"
>>> hashingTF.save(hashingTFPath)
>>> loadedHashingTF = HashingTF.load(hashingTFPath)
>>> loadedHashingTF.getNumFeatures() == hashingTF.getNumFeatures()
True
>>> loadedHashingTF.transform(df).take(1) == hashingTF.transform(df).take(1)
True
>>> hashingTF.indexOf("b")
5
"""
binary = Param(Params._dummy(), "binary", "If True, all non zero counts are set to 1. " +
"This is useful for discrete probabilistic models that model binary events " +
"rather than integer counts. Default False.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, *, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
__init__(self, \\*, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, *, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
setParams(self, \\*, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setNumFeatures(self, value):
"""
Sets the value of :py:attr:`numFeatures`.
"""
return self._set(numFeatures=value)
@since("3.0.0")
def indexOf(self, term):
"""
Returns the index of the input term.
"""
self._transfer_params_to_java()
return self._java_obj.indexOf(term)
class _IDFParams(HasInputCol, HasOutputCol):
"""
Params for :py:class:`IDF` and :py:class:`IDFModel`.
.. versionadded:: 3.0.0
"""
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum number of documents in which a term should appear for filtering",
typeConverter=TypeConverters.toInt)
@since("1.4.0")
def getMinDocFreq(self):
"""
Gets the value of minDocFreq or its default value.
"""
return self.getOrDefault(self.minDocFreq)
def __init__(self, *args):
super(_IDFParams, self).__init__(*args)
self._setDefault(minDocFreq=0)
@inherit_doc
class IDF(JavaEstimator, _IDFParams, JavaMLReadable, JavaMLWritable):
"""
Compute the Inverse Document Frequency (IDF) given a collection of documents.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import DenseVector
>>> df = spark.createDataFrame([(DenseVector([1.0, 2.0]),),
... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"])
>>> idf = IDF(minDocFreq=3)
>>> idf.setInputCol("tf")
IDF...
>>> idf.setOutputCol("idf")
IDF...
>>> model = idf.fit(df)
>>> model.setOutputCol("idf")
IDFModel...
>>> model.getMinDocFreq()
3
>>> model.idf
DenseVector([0.0, 0.0])
>>> model.docFreq
[0, 3]
>>> model.numDocs == df.count()
True
>>> model.transform(df).head().idf
DenseVector([0.0, 0.0])
>>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs
DenseVector([0.0, 0.0])
>>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"}
>>> idf.fit(df, params).transform(df).head().vector
DenseVector([0.2877, 0.0])
>>> idfPath = temp_path + "/idf"
>>> idf.save(idfPath)
>>> loadedIdf = IDF.load(idfPath)
>>> loadedIdf.getMinDocFreq() == idf.getMinDocFreq()
True
>>> modelPath = temp_path + "/idf-model"
>>> model.save(modelPath)
>>> loadedModel = IDFModel.load(modelPath)
>>> loadedModel.transform(df).head().idf == model.transform(df).head().idf
True
"""
@keyword_only
def __init__(self, *, minDocFreq=0, inputCol=None, outputCol=None):
"""
__init__(self, \\*, minDocFreq=0, inputCol=None, outputCol=None)
"""
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, minDocFreq=0, inputCol=None, outputCol=None):
"""
setParams(self, \\*, minDocFreq=0, inputCol=None, outputCol=None)
Sets params for this IDF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinDocFreq(self, value):
"""
Sets the value of :py:attr:`minDocFreq`.
"""
return self._set(minDocFreq=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel, _IDFParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`IDF`.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.0.0")
def idf(self):
"""
Returns the IDF vector.
"""
return self._call_java("idf")
@property
@since("3.0.0")
def docFreq(self):
"""
Returns the document frequency.
"""
return self._call_java("docFreq")
@property
@since("3.0.0")
def numDocs(self):
"""
Returns number of documents evaluated to compute idf
"""
return self._call_java("numDocs")
class _ImputerParams(HasInputCol, HasInputCols, HasOutputCol, HasOutputCols, HasRelativeError):
"""
Params for :py:class:`Imputer` and :py:class:`ImputerModel`.
.. versionadded:: 3.0.0
"""
strategy = Param(Params._dummy(), "strategy",
"strategy for imputation. If mean, then replace missing values using the mean "
"value of the feature. If median, then replace missing values using the "
"median value of the feature. If mode, then replace missing using the most "
"frequent value of the feature.",
typeConverter=TypeConverters.toString)
missingValue = Param(Params._dummy(), "missingValue",
"The placeholder for the missing values. All occurrences of missingValue "
"will be imputed.", typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_ImputerParams, self).__init__(*args)
self._setDefault(strategy="mean", missingValue=float("nan"), relativeError=0.001)
@since("2.2.0")
def getStrategy(self):
"""
Gets the value of :py:attr:`strategy` or its default value.
"""
return self.getOrDefault(self.strategy)
@since("2.2.0")
def getMissingValue(self):
"""
Gets the value of :py:attr:`missingValue` or its default value.
"""
return self.getOrDefault(self.missingValue)
@inherit_doc
class Imputer(JavaEstimator, _ImputerParams, JavaMLReadable, JavaMLWritable):
"""
Imputation estimator for completing missing values, using the mean, median or mode
of the columns in which the missing values are located. The input columns should be of
numeric type. Currently Imputer does not support categorical features and
possibly creates incorrect values for a categorical feature.
Note that the mean/median/mode value is computed after filtering out missing values.
All Null values in the input columns are treated as missing, and so are also imputed. For
computing median, :py:meth:`pyspark.sql.DataFrame.approxQuantile` is used with a
relative error of `0.001`.
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float("nan")), (2.0, float("nan")), (float("nan"), 3.0),
... (4.0, 4.0), (5.0, 5.0)], ["a", "b"])
>>> imputer = Imputer()
>>> imputer.setInputCols(["a", "b"])
Imputer...
>>> imputer.setOutputCols(["out_a", "out_b"])
Imputer...
>>> imputer.getRelativeError()
0.001
>>> model = imputer.fit(df)
>>> model.setInputCols(["a", "b"])
ImputerModel...
>>> model.getStrategy()
'mean'
>>> model.surrogateDF.show()
+---+---+
| a| b|
+---+---+
|3.0|4.0|
+---+---+
...
>>> model.transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 1.0| 4.0|
|2.0|NaN| 2.0| 4.0|
|NaN|3.0| 3.0| 3.0|
...
>>> imputer.setStrategy("median").setMissingValue(1.0).fit(df).transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 4.0| NaN|
...
>>> df1 = spark.createDataFrame([(1.0,), (2.0,), (float("nan"),), (4.0,), (5.0,)], ["a"])
>>> imputer1 = Imputer(inputCol="a", outputCol="out_a")
>>> model1 = imputer1.fit(df1)
>>> model1.surrogateDF.show()
+---+
| a|
+---+
|3.0|
+---+
...
>>> model1.transform(df1).show()
+---+-----+
| a|out_a|
+---+-----+
|1.0| 1.0|
|2.0| 2.0|
|NaN| 3.0|
...
>>> imputer1.setStrategy("median").setMissingValue(1.0).fit(df1).transform(df1).show()
+---+-----+
| a|out_a|
+---+-----+
|1.0| 4.0|
...
>>> df2 = spark.createDataFrame([(float("nan"),), (float("nan"),), (3.0,), (4.0,), (5.0,)],
... ["b"])
>>> imputer2 = Imputer(inputCol="b", outputCol="out_b")
>>> model2 = imputer2.fit(df2)
>>> model2.surrogateDF.show()
+---+
| b|
+---+
|4.0|
+---+
...
>>> model2.transform(df2).show()
+---+-----+
| b|out_b|
+---+-----+
|NaN| 4.0|
|NaN| 4.0|
|3.0| 3.0|
...
>>> imputer2.setStrategy("median").setMissingValue(1.0).fit(df2).transform(df2).show()
+---+-----+
| b|out_b|
+---+-----+
|NaN| NaN|
...
>>> imputerPath = temp_path + "/imputer"
>>> imputer.save(imputerPath)
>>> loadedImputer = Imputer.load(imputerPath)
>>> loadedImputer.getStrategy() == imputer.getStrategy()
True
>>> loadedImputer.getMissingValue()
1.0
>>> modelPath = temp_path + "/imputer-model"
>>> model.save(modelPath)
>>> loadedModel = ImputerModel.load(modelPath)
>>> loadedModel.transform(df).head().out_a == model.transform(df).head().out_a
True
"""
@keyword_only
def __init__(self, *, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None, inputCol=None, outputCol=None, relativeError=0.001):
"""
__init__(self, \\*, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None, inputCol=None, outputCol=None, relativeError=0.001):
"""
super(Imputer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Imputer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None, inputCol=None, outputCol=None, relativeError=0.001):
"""
setParams(self, \\*, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None, inputCol=None, outputCol=None, relativeError=0.001)
Sets params for this Imputer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setStrategy(self, value):
"""
Sets the value of :py:attr:`strategy`.
"""
return self._set(strategy=value)
@since("2.2.0")
def setMissingValue(self, value):
"""
Sets the value of :py:attr:`missingValue`.
"""
return self._set(missingValue=value)
@since("2.2.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
@since("2.2.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setRelativeError(self, value):
"""
Sets the value of :py:attr:`relativeError`.
"""
return self._set(relativeError=value)
def _create_model(self, java_model):
return ImputerModel(java_model)
class ImputerModel(JavaModel, _ImputerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`Imputer`.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.2.0")
def surrogateDF(self):
"""
Returns a DataFrame containing inputCols and their corresponding surrogates,
which are used to replace the missing values in the input DataFrame.
"""
return self._call_java("surrogateDF")
@inherit_doc
class Interaction(JavaTransformer, HasInputCols, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Implements the feature interaction transform. This transformer takes in Double and Vector type
columns and outputs a flattened vector of their feature interactions. To handle interaction,
we first one-hot encode any nominal features. Then, a vector of the feature cross-products is
produced.
For example, given the input feature values `Double(2)` and `Vector(3, 4)`, the output would be
`Vector(6, 8)` if all input features were numeric. If the first feature was instead nominal
with four categories, the output would then be `Vector(0, 0, 0, 0, 3, 4, 0, 0)`.
.. versionadded:: 3.0.0
Examples
--------
>>> df = spark.createDataFrame([(0.0, 1.0), (2.0, 3.0)], ["a", "b"])
>>> interaction = Interaction()
>>> interaction.setInputCols(["a", "b"])
Interaction...
>>> interaction.setOutputCol("ab")
Interaction...
>>> interaction.transform(df).show()
+---+---+-----+
| a| b| ab|
+---+---+-----+
|0.0|1.0|[0.0]|
|2.0|3.0|[6.0]|
+---+---+-----+
...
>>> interactionPath = temp_path + "/interaction"
>>> interaction.save(interactionPath)
>>> loadedInteraction = Interaction.load(interactionPath)
>>> loadedInteraction.transform(df).head().ab == interaction.transform(df).head().ab
True
"""
@keyword_only
def __init__(self, *, inputCols=None, outputCol=None):
"""
__init__(self, \\*, inputCols=None, outputCol=None):
"""
super(Interaction, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Interaction", self.uid)
self._setDefault()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, inputCols=None, outputCol=None):
"""
setParams(self, \\*, inputCols=None, outputCol=None)
Sets params for this Interaction.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
class _MaxAbsScalerParams(HasInputCol, HasOutputCol):
"""
Params for :py:class:`MaxAbsScaler` and :py:class:`MaxAbsScalerModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class MaxAbsScaler(JavaEstimator, _MaxAbsScalerParams, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to range [-1, 1] by dividing through the largest maximum
absolute value in each feature. It does not shift/center the data, and thus does not destroy
any sparsity.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> maScaler = MaxAbsScaler(outputCol="scaled")
>>> maScaler.setInputCol("a")
MaxAbsScaler...
>>> model = maScaler.fit(df)
>>> model.setOutputCol("scaledOutput")
MaxAbsScalerModel...
>>> model.transform(df).show()
+-----+------------+
| a|scaledOutput|
+-----+------------+
|[1.0]| [0.5]|
|[2.0]| [1.0]|
+-----+------------+
...
>>> scalerPath = temp_path + "/max-abs-scaler"
>>> maScaler.save(scalerPath)
>>> loadedMAScaler = MaxAbsScaler.load(scalerPath)
>>> loadedMAScaler.getInputCol() == maScaler.getInputCol()
True
>>> loadedMAScaler.getOutputCol() == maScaler.getOutputCol()
True
>>> modelPath = temp_path + "/max-abs-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MaxAbsScalerModel.load(modelPath)
>>> loadedModel.maxAbs == model.maxAbs
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None):
"""
__init__(self, \\*, inputCol=None, outputCol=None)
"""
super(MaxAbsScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MaxAbsScaler", self.uid)
self._setDefault()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, inputCol=None, outputCol=None):
"""
setParams(self, \\*, inputCol=None, outputCol=None)
Sets params for this MaxAbsScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return MaxAbsScalerModel(java_model)
class MaxAbsScalerModel(JavaModel, _MaxAbsScalerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MaxAbsScaler`.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.0.0")
def maxAbs(self):
"""
Max Abs vector.
"""
return self._call_java("maxAbs")
@inherit_doc
class MinHashLSH(_LSH, HasInputCol, HasOutputCol, HasSeed, JavaMLReadable, JavaMLWritable):
"""
LSH class for Jaccard distance.
The input can be dense or sparse vectors, but it is more efficient if it is sparse.
For example, `Vectors.sparse(10, [(2, 1.0), (3, 1.0), (5, 1.0)])` means there are 10 elements
in the space. This set contains elements 2, 3, and 5. Also, any input vector must have at
least 1 non-zero index, and all non-zero values are treated as binary "1" values.
.. versionadded:: 2.2.0
Notes
-----
See `Wikipedia on MinHash <https://en.wikipedia.org/wiki/MinHash>`_
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
... (1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
... (2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> mh = MinHashLSH()
>>> mh.setInputCol("features")
MinHashLSH...
>>> mh.setOutputCol("hashes")
MinHashLSH...
>>> mh.setSeed(12345)
MinHashLSH...
>>> model = mh.fit(df)
>>> model.setInputCol("features")
MinHashLSHModel...
>>> model.transform(df).head()
Row(id=0, features=SparseVector(6, {0: 1.0, 1: 1.0, 2: 1.0}), hashes=[DenseVector([6179668...
>>> data2 = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
... (4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
... (5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> key = Vectors.sparse(6, [1, 2], [1.0, 1.0])
>>> model.approxNearestNeighbors(df2, key, 1).collect()
[Row(id=5, features=SparseVector(6, {1: 1.0, 2: 1.0, 4: 1.0}), hashes=[DenseVector([6179668...
>>> model.approxSimilarityJoin(df, df2, 0.6, distCol="JaccardDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("JaccardDistance")).show()
+---+---+---------------+
|idA|idB|JaccardDistance|
+---+---+---------------+
| 0| 5| 0.5|
| 1| 4| 0.5|
+---+---+---------------+
...
>>> mhPath = temp_path + "/mh"
>>> mh.save(mhPath)
>>> mh2 = MinHashLSH.load(mhPath)
>>> mh2.getOutputCol() == mh.getOutputCol()
True
>>> modelPath = temp_path + "/mh-model"
>>> model.save(modelPath)
>>> model2 = MinHashLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
"""
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
__init__(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1)
"""
super(MinHashLSH, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinHashLSH", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
setParams(self, \\*, inputCol=None, outputCol=None, seed=None, numHashTables=1)
Sets params for this MinHashLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def _create_model(self, java_model):
return MinHashLSHModel(java_model)
class MinHashLSHModel(_LSHModel, JavaMLReadable, JavaMLWritable):
r"""
Model produced by :py:class:`MinHashLSH`, where where multiple hash functions are stored. Each
hash function is picked from the following family of hash functions, where :math:`a_i` and
:math:`b_i` are randomly chosen integers less than prime:
:math:`h_i(x) = ((x \cdot a_i + b_i) \mod prime)` This hash family is approximately min-wise
independent according to the reference.
.. versionadded:: 2.2.0
Notes
-----
See Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear permutations."
Electronic Journal of Combinatorics 7 (2000): R26.
"""
class _MinMaxScalerParams(HasInputCol, HasOutputCol):
"""
Params for :py:class:`MinMaxScaler` and :py:class:`MinMaxScalerModel`.
.. versionadded:: 3.0.0
"""
min = Param(Params._dummy(), "min", "Lower bound of the output feature range",
typeConverter=TypeConverters.toFloat)
max = Param(Params._dummy(), "max", "Upper bound of the output feature range",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_MinMaxScalerParams, self).__init__(*args)
self._setDefault(min=0.0, max=1.0)
@since("1.6.0")
def getMin(self):
"""
Gets the value of min or its default value.
"""
return self.getOrDefault(self.min)
@since("1.6.0")
def getMax(self):
"""
Gets the value of max or its default value.
"""
return self.getOrDefault(self.max)
@inherit_doc
class MinMaxScaler(JavaEstimator, _MinMaxScalerParams, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to a common range [min, max] linearly using column summary
statistics, which is also known as min-max normalization or Rescaling. The rescaled value for
feature E is calculated as,
Rescaled(e_i) = (e_i - E_min) / (E_max - E_min) * (max - min) + min
For the case E_max == E_min, Rescaled(e_i) = 0.5 * (max + min)
.. versionadded:: 1.6.0
Notes
-----
Since zero values will probably be transformed to non-zero values, output of the
transformer will be DenseVector even for sparse input.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> mmScaler = MinMaxScaler(outputCol="scaled")
>>> mmScaler.setInputCol("a")
MinMaxScaler...
>>> model = mmScaler.fit(df)
>>> model.setOutputCol("scaledOutput")
MinMaxScalerModel...
>>> model.originalMin
DenseVector([0.0])
>>> model.originalMax
DenseVector([2.0])
>>> model.transform(df).show()
+-----+------------+
| a|scaledOutput|
+-----+------------+
|[0.0]| [0.0]|
|[2.0]| [1.0]|
+-----+------------+
...
>>> minMaxScalerPath = temp_path + "/min-max-scaler"
>>> mmScaler.save(minMaxScalerPath)
>>> loadedMMScaler = MinMaxScaler.load(minMaxScalerPath)
>>> loadedMMScaler.getMin() == mmScaler.getMin()
True
>>> loadedMMScaler.getMax() == mmScaler.getMax()
True
>>> modelPath = temp_path + "/min-max-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MinMaxScalerModel.load(modelPath)
>>> loadedModel.originalMin == model.originalMin
True
>>> loadedModel.originalMax == model.originalMax
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
__init__(self, \\*, min=0.0, max=1.0, inputCol=None, outputCol=None)
"""
super(MinMaxScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinMaxScaler", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
setParams(self, \\*, min=0.0, max=1.0, inputCol=None, outputCol=None)
Sets params for this MinMaxScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMin(self, value):
"""
Sets the value of :py:attr:`min`.
"""
return self._set(min=value)
@since("1.6.0")
def setMax(self, value):
"""
Sets the value of :py:attr:`max`.
"""
return self._set(max=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return MinMaxScalerModel(java_model)
class MinMaxScalerModel(JavaModel, _MinMaxScalerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MinMaxScaler`.
.. versionadded:: 1.6.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setMin(self, value):
"""
Sets the value of :py:attr:`min`.
"""
return self._set(min=value)
@since("3.0.0")
def setMax(self, value):
"""
Sets the value of :py:attr:`max`.
"""
return self._set(max=value)
@property
@since("2.0.0")
def originalMin(self):
"""
Min value for each original column during fitting.
"""
return self._call_java("originalMin")
@property
@since("2.0.0")
def originalMax(self):
"""
Max value for each original column during fitting.
"""
return self._call_java("originalMax")
@inherit_doc
class NGram(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that converts the input array of strings into an array of n-grams. Null
values in the input array are ignored.
It returns an array of n-grams where each n-gram is represented by a space-separated string of
words.
When the input is empty, an empty array is returned.
When the input array length is less than n (number of elements per n-gram), no n-grams are
returned.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([Row(inputTokens=["a", "b", "c", "d", "e"])])
>>> ngram = NGram(n=2)
>>> ngram.setInputCol("inputTokens")
NGram...
>>> ngram.setOutputCol("nGrams")
NGram...
>>> ngram.transform(df).head()
Row(inputTokens=['a', 'b', 'c', 'd', 'e'], nGrams=['a b', 'b c', 'c d', 'd e'])
>>> # Change n-gram length
>>> ngram.setParams(n=4).transform(df).head()
Row(inputTokens=['a', 'b', 'c', 'd', 'e'], nGrams=['a b c d', 'b c d e'])
>>> # Temporarily modify output column.
>>> ngram.transform(df, {ngram.outputCol: "output"}).head()
Row(inputTokens=['a', 'b', 'c', 'd', 'e'], output=['a b c d', 'b c d e'])
>>> ngram.transform(df).head()
Row(inputTokens=['a', 'b', 'c', 'd', 'e'], nGrams=['a b c d', 'b c d e'])
>>> # Must use keyword arguments to specify params.
>>> ngram.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> ngramPath = temp_path + "/ngram"
>>> ngram.save(ngramPath)
>>> loadedNGram = NGram.load(ngramPath)
>>> loadedNGram.getN() == ngram.getN()
True
>>> loadedNGram.transform(df).take(1) == ngram.transform(df).take(1)
True
"""
n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, *, n=2, inputCol=None, outputCol=None):
"""
__init__(self, \\*, n=2, inputCol=None, outputCol=None)
"""
super(NGram, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid)
self._setDefault(n=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, n=2, inputCol=None, outputCol=None):
"""
setParams(self, \\*, n=2, inputCol=None, outputCol=None)
Sets params for this NGram.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setN(self, value):
"""
Sets the value of :py:attr:`n`.
"""
return self._set(n=value)
@since("1.5.0")
def getN(self):
"""
Gets the value of n or its default value.
"""
return self.getOrDefault(self.n)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@inherit_doc
class Normalizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Normalize a vector to have unit norm using the given p-norm.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> svec = Vectors.sparse(4, {1: 4.0, 3: 3.0})
>>> df = spark.createDataFrame([(Vectors.dense([3.0, -4.0]), svec)], ["dense", "sparse"])
>>> normalizer = Normalizer(p=2.0)
>>> normalizer.setInputCol("dense")
Normalizer...
>>> normalizer.setOutputCol("features")
Normalizer...
>>> normalizer.transform(df).head().features
DenseVector([0.6, -0.8])
>>> normalizer.setParams(inputCol="sparse", outputCol="freqs").transform(df).head().freqs
SparseVector(4, {1: 0.8, 3: 0.6})
>>> params = {normalizer.p: 1.0, normalizer.inputCol: "dense", normalizer.outputCol: "vector"}
>>> normalizer.transform(df, params).head().vector
DenseVector([0.4286, -0.5714])
>>> normalizerPath = temp_path + "/normalizer"
>>> normalizer.save(normalizerPath)
>>> loadedNormalizer = Normalizer.load(normalizerPath)
>>> loadedNormalizer.getP() == normalizer.getP()
True
>>> loadedNormalizer.transform(df).take(1) == normalizer.transform(df).take(1)
True
"""
p = Param(Params._dummy(), "p", "the p norm value.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, *, p=2.0, inputCol=None, outputCol=None):
"""
__init__(self, \\*, p=2.0, inputCol=None, outputCol=None)
"""
super(Normalizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid)
self._setDefault(p=2.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, p=2.0, inputCol=None, outputCol=None):
"""
setParams(self, \\*, p=2.0, inputCol=None, outputCol=None)
Sets params for this Normalizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setP(self, value):
"""
Sets the value of :py:attr:`p`.
"""
return self._set(p=value)
@since("1.4.0")
def getP(self):
"""
Gets the value of p or its default value.
"""
return self.getOrDefault(self.p)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
class _OneHotEncoderParams(HasInputCol, HasInputCols, HasOutputCol, HasOutputCols,
HasHandleInvalid):
"""
Params for :py:class:`OneHotEncoder` and :py:class:`OneHotEncoderModel`.
.. versionadded:: 3.0.0
"""
handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data during " +
"transform(). Options are 'keep' (invalid data presented as an extra " +
"categorical feature) or error (throw an error). Note that this Param " +
"is only used during transform; during fitting, invalid data will " +
"result in an error.",
typeConverter=TypeConverters.toString)
dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category",
typeConverter=TypeConverters.toBoolean)
def __init__(self, *args):
super(_OneHotEncoderParams, self).__init__(*args)
self._setDefault(handleInvalid="error", dropLast=True)
@since("2.3.0")
def getDropLast(self):
"""
Gets the value of dropLast or its default value.
"""
return self.getOrDefault(self.dropLast)
@inherit_doc
class OneHotEncoder(JavaEstimator, _OneHotEncoderParams, JavaMLReadable, JavaMLWritable):
"""
A one-hot encoder that maps a column of category indices to a column of binary vectors, with
at most a single one-value per row that indicates the input category index.
For example with 5 categories, an input value of 2.0 would map to an output vector of
`[0.0, 0.0, 1.0, 0.0]`.
The last category is not included by default (configurable via :py:attr:`dropLast`),
because it makes the vector entries sum up to one, and hence linearly dependent.
So an input value of 4.0 maps to `[0.0, 0.0, 0.0, 0.0]`.
When :py:attr:`handleInvalid` is configured to 'keep', an extra "category" indicating invalid
values is added as last category. So when :py:attr:`dropLast` is true, invalid values are
encoded as all-zeros vector.
.. versionadded:: 2.3.0
Notes
-----
This is different from scikit-learn's OneHotEncoder, which keeps all categories.
The output vectors are sparse.
When encoding multi-column by using :py:attr:`inputCols` and
:py:attr:`outputCols` params, input/output cols come in pairs, specified by the order in
the arrays, and each pair is treated independently.
See Also
--------
StringIndexer : for converting categorical values into category indices
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(0.0,), (1.0,), (2.0,)], ["input"])
>>> ohe = OneHotEncoder()
>>> ohe.setInputCols(["input"])
OneHotEncoder...
>>> ohe.setOutputCols(["output"])
OneHotEncoder...
>>> model = ohe.fit(df)
>>> model.setOutputCols(["output"])
OneHotEncoderModel...
>>> model.getHandleInvalid()
'error'
>>> model.transform(df).head().output
SparseVector(2, {0: 1.0})
>>> single_col_ohe = OneHotEncoder(inputCol="input", outputCol="output")
>>> single_col_model = single_col_ohe.fit(df)
>>> single_col_model.transform(df).head().output
SparseVector(2, {0: 1.0})
>>> ohePath = temp_path + "/ohe"
>>> ohe.save(ohePath)
>>> loadedOHE = OneHotEncoder.load(ohePath)
>>> loadedOHE.getInputCols() == ohe.getInputCols()
True
>>> modelPath = temp_path + "/ohe-model"
>>> model.save(modelPath)
>>> loadedModel = OneHotEncoderModel.load(modelPath)
>>> loadedModel.categorySizes == model.categorySizes
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True,
inputCol=None, outputCol=None):
"""
__init__(self, \\*, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True, \
inputCol=None, outputCol=None)
"""
super(OneHotEncoder, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.OneHotEncoder", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, *, inputCols=None, outputCols=None, handleInvalid="error",
dropLast=True, inputCol=None, outputCol=None):
"""
setParams(self, \\*, inputCols=None, outputCols=None, handleInvalid="error", \
dropLast=True, inputCol=None, outputCol=None)
Sets params for this OneHotEncoder.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def setDropLast(self, value):
"""
Sets the value of :py:attr:`dropLast`.
"""
return self._set(dropLast=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("3.0.0")
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return OneHotEncoderModel(java_model)
class OneHotEncoderModel(JavaModel, _OneHotEncoderParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`OneHotEncoder`.
.. versionadded:: 2.3.0
"""
@since("3.0.0")
def setDropLast(self, value):
"""
Sets the value of :py:attr:`dropLast`.
"""
return self._set(dropLast=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
@property
@since("2.3.0")
def categorySizes(self):
"""
Original number of categories for each feature being encoded.
The array contains one value for each input column, in order.
"""
return self._call_java("categorySizes")
@inherit_doc
class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Perform feature expansion in a polynomial space. As said in `wikipedia of Polynomial Expansion
<http://en.wikipedia.org/wiki/Polynomial_expansion>`_, "In mathematics, an
expansion of a product of sums expresses it as a sum of products by using the fact that
multiplication distributes over addition". Take a 2-variable feature vector as an example:
`(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.5, 2.0]),)], ["dense"])
>>> px = PolynomialExpansion(degree=2)
>>> px.setInputCol("dense")
PolynomialExpansion...
>>> px.setOutputCol("expanded")
PolynomialExpansion...
>>> px.transform(df).head().expanded
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> px.setParams(outputCol="test").transform(df).head().test
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> polyExpansionPath = temp_path + "/poly-expansion"
>>> px.save(polyExpansionPath)
>>> loadedPx = PolynomialExpansion.load(polyExpansionPath)
>>> loadedPx.getDegree() == px.getDegree()
True
>>> loadedPx.transform(df).take(1) == px.transform(df).take(1)
True
"""
degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, *, degree=2, inputCol=None, outputCol=None):
"""
__init__(self, \\*, degree=2, inputCol=None, outputCol=None)
"""
super(PolynomialExpansion, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.PolynomialExpansion", self.uid)
self._setDefault(degree=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, degree=2, inputCol=None, outputCol=None):
"""
setParams(self, \\*, degree=2, inputCol=None, outputCol=None)
Sets params for this PolynomialExpansion.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setDegree(self, value):
"""
Sets the value of :py:attr:`degree`.
"""
return self._set(degree=value)
@since("1.4.0")
def getDegree(self):
"""
Gets the value of degree or its default value.
"""
return self.getOrDefault(self.degree)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@inherit_doc
class QuantileDiscretizer(JavaEstimator, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols,
HasHandleInvalid, HasRelativeError, JavaMLReadable, JavaMLWritable):
"""
:py:class:`QuantileDiscretizer` takes a column with continuous features and outputs a column
with binned categorical features. The number of bins can be set using the :py:attr:`numBuckets`
parameter. It is possible that the number of buckets used will be less than this value, for
example, if there are too few distinct values of the input to create enough distinct quantiles.
Since 3.0.0, :py:class:`QuantileDiscretizer` can map multiple columns at once by setting the
:py:attr:`inputCols` parameter. If both of the :py:attr:`inputCol` and :py:attr:`inputCols`
parameters are set, an Exception will be thrown. To specify the number of buckets for each
column, the :py:attr:`numBucketsArray` parameter can be set, or if the number of buckets
should be the same across columns, :py:attr:`numBuckets` can be set as a convenience.
.. versionadded:: 2.0.0
Notes
-----
NaN handling: Note also that
:py:class:`QuantileDiscretizer` will raise an error when it finds NaN values in the dataset,
but the user can also choose to either keep or remove NaN values within the dataset by setting
:py:attr:`handleInvalid` parameter. If the user chooses to keep NaN values, they will be
handled specially and placed into their own bucket, for example, if 4 buckets are used, then
non-NaN data will be put into buckets[0-3], but NaNs will be counted in a special bucket[4].
Algorithm: The bin ranges are chosen using an approximate algorithm (see the documentation for
:py:meth:`~.DataFrameStatFunctions.approxQuantile` for a detailed description).
The precision of the approximation can be controlled with the
:py:attr:`relativeError` parameter.
The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values.
Examples
--------
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df1 = spark.createDataFrame(values, ["values"])
>>> qds1 = QuantileDiscretizer(inputCol="values", outputCol="buckets")
>>> qds1.setNumBuckets(2)
QuantileDiscretizer...
>>> qds1.setRelativeError(0.01)
QuantileDiscretizer...
>>> qds1.setHandleInvalid("error")
QuantileDiscretizer...
>>> qds1.getRelativeError()
0.01
>>> bucketizer = qds1.fit(df1)
>>> qds1.setHandleInvalid("keep").fit(df1).transform(df1).count()
6
>>> qds1.setHandleInvalid("skip").fit(df1).transform(df1).count()
4
>>> splits = bucketizer.getSplits()
>>> splits[0]
-inf
>>> print("%2.1f" % round(splits[1], 1))
0.4
>>> bucketed = bucketizer.transform(df1).head()
>>> bucketed.buckets
0.0
>>> quantileDiscretizerPath = temp_path + "/quantile-discretizer"
>>> qds1.save(quantileDiscretizerPath)
>>> loadedQds = QuantileDiscretizer.load(quantileDiscretizerPath)
>>> loadedQds.getNumBuckets() == qds1.getNumBuckets()
True
>>> inputs = [(0.1, 0.0), (0.4, 1.0), (1.2, 1.3), (1.5, 1.5),
... (float("nan"), float("nan")), (float("nan"), float("nan"))]
>>> df2 = spark.createDataFrame(inputs, ["input1", "input2"])
>>> qds2 = QuantileDiscretizer(relativeError=0.01, handleInvalid="error", numBuckets=2,
... inputCols=["input1", "input2"], outputCols=["output1", "output2"])
>>> qds2.getRelativeError()
0.01
>>> qds2.setHandleInvalid("keep").fit(df2).transform(df2).show()
+------+------+-------+-------+
|input1|input2|output1|output2|
+------+------+-------+-------+
| 0.1| 0.0| 0.0| 0.0|
| 0.4| 1.0| 1.0| 1.0|
| 1.2| 1.3| 1.0| 1.0|
| 1.5| 1.5| 1.0| 1.0|
| NaN| NaN| 2.0| 2.0|
| NaN| NaN| 2.0| 2.0|
+------+------+-------+-------+
...
>>> qds3 = QuantileDiscretizer(relativeError=0.01, handleInvalid="error",
... numBucketsArray=[5, 10], inputCols=["input1", "input2"],
... outputCols=["output1", "output2"])
>>> qds3.setHandleInvalid("skip").fit(df2).transform(df2).show()
+------+------+-------+-------+
|input1|input2|output1|output2|
+------+------+-------+-------+
| 0.1| 0.0| 1.0| 1.0|
| 0.4| 1.0| 2.0| 2.0|
| 1.2| 1.3| 3.0| 3.0|
| 1.5| 1.5| 4.0| 4.0|
+------+------+-------+-------+
...
"""
numBuckets = Param(Params._dummy(), "numBuckets",
"Maximum number of buckets (quantiles, or " +
"categories) into which data points are grouped. Must be >= 2.",
typeConverter=TypeConverters.toInt)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are skip (filter out rows with invalid values), " +
"error (throw an error), or keep (keep invalid values in a special " +
"additional bucket). Note that in the multiple columns " +
"case, the invalid handling is applied to all columns. That said " +
"for 'error' it will throw an error if any invalids are found in " +
"any columns, for 'skip' it will skip rows with any invalids in " +
"any columns, etc.",
typeConverter=TypeConverters.toString)
numBucketsArray = Param(Params._dummy(), "numBucketsArray", "Array of number of buckets " +
"(quantiles, or categories) into which data points are grouped. " +
"This is for multiple columns input. If transforming multiple " +
"columns and numBucketsArray is not set, but numBuckets is set, " +
"then numBuckets will be applied across all columns.",
typeConverter=TypeConverters.toListInt)
@keyword_only
def __init__(self, *, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None):
"""
__init__(self, \\*, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None)
"""
super(QuantileDiscretizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.QuantileDiscretizer",
self.uid)
self._setDefault(numBuckets=2, relativeError=0.001, handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None):
"""
setParams(self, \\*, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error", numBucketsArray=None, inputCols=None, outputCols=None)
Set the params for the QuantileDiscretizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setNumBuckets(self, value):
"""
Sets the value of :py:attr:`numBuckets`.
"""
return self._set(numBuckets=value)
@since("2.0.0")
def getNumBuckets(self):
"""
Gets the value of numBuckets or its default value.
"""
return self.getOrDefault(self.numBuckets)
@since("3.0.0")
def setNumBucketsArray(self, value):
"""
Sets the value of :py:attr:`numBucketsArray`.
"""
return self._set(numBucketsArray=value)
@since("3.0.0")
def getNumBucketsArray(self):
"""
Gets the value of numBucketsArray or its default value.
"""
return self.getOrDefault(self.numBucketsArray)
@since("2.0.0")
def setRelativeError(self, value):
"""
Sets the value of :py:attr:`relativeError`.
"""
return self._set(relativeError=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
def _create_model(self, java_model):
"""
Private method to convert the java_model to a Python model.
"""
if (self.isSet(self.inputCol)):
return Bucketizer(splits=list(java_model.getSplits()),
inputCol=self.getInputCol(),
outputCol=self.getOutputCol(),
handleInvalid=self.getHandleInvalid())
else:
splitsArrayList = [list(x) for x in list(java_model.getSplitsArray())]
return Bucketizer(splitsArray=splitsArrayList,
inputCols=self.getInputCols(),
outputCols=self.getOutputCols(),
handleInvalid=self.getHandleInvalid())
class _RobustScalerParams(HasInputCol, HasOutputCol, HasRelativeError):
"""
Params for :py:class:`RobustScaler` and :py:class:`RobustScalerModel`.
.. versionadded:: 3.0.0
"""
lower = Param(Params._dummy(), "lower", "Lower quantile to calculate quantile range",
typeConverter=TypeConverters.toFloat)
upper = Param(Params._dummy(), "upper", "Upper quantile to calculate quantile range",
typeConverter=TypeConverters.toFloat)
withCentering = Param(Params._dummy(), "withCentering", "Whether to center data with median",
typeConverter=TypeConverters.toBoolean)
withScaling = Param(Params._dummy(), "withScaling", "Whether to scale the data to "
"quantile range", typeConverter=TypeConverters.toBoolean)
def __init__(self, *args):
super(_RobustScalerParams, self).__init__(*args)
self._setDefault(lower=0.25, upper=0.75, withCentering=False, withScaling=True,
relativeError=0.001)
@since("3.0.0")
def getLower(self):
"""
Gets the value of lower or its default value.
"""
return self.getOrDefault(self.lower)
@since("3.0.0")
def getUpper(self):
"""
Gets the value of upper or its default value.
"""
return self.getOrDefault(self.upper)
@since("3.0.0")
def getWithCentering(self):
"""
Gets the value of withCentering or its default value.
"""
return self.getOrDefault(self.withCentering)
@since("3.0.0")
def getWithScaling(self):
"""
Gets the value of withScaling or its default value.
"""
return self.getOrDefault(self.withScaling)
@inherit_doc
class RobustScaler(JavaEstimator, _RobustScalerParams, JavaMLReadable, JavaMLWritable):
"""
RobustScaler removes the median and scales the data according to the quantile range.
The quantile range is by default IQR (Interquartile Range, quantile range between the
1st quartile = 25th quantile and the 3rd quartile = 75th quantile) but can be configured.
Centering and scaling happen independently on each feature by computing the relevant
statistics on the samples in the training set. Median and quantile range are then
stored to be used on later data using the transform method.
Note that NaN values are ignored in the computation of medians and ranges.
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(0, Vectors.dense([0.0, 0.0]),),
... (1, Vectors.dense([1.0, -1.0]),),
... (2, Vectors.dense([2.0, -2.0]),),
... (3, Vectors.dense([3.0, -3.0]),),
... (4, Vectors.dense([4.0, -4.0]),),]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> scaler = RobustScaler()
>>> scaler.setInputCol("features")
RobustScaler...
>>> scaler.setOutputCol("scaled")
RobustScaler...
>>> model = scaler.fit(df)
>>> model.setOutputCol("output")
RobustScalerModel...
>>> model.median
DenseVector([2.0, -2.0])
>>> model.range
DenseVector([2.0, 2.0])
>>> model.transform(df).collect()[1].output
DenseVector([0.5, -0.5])
>>> scalerPath = temp_path + "/robust-scaler"
>>> scaler.save(scalerPath)
>>> loadedScaler = RobustScaler.load(scalerPath)
>>> loadedScaler.getWithCentering() == scaler.getWithCentering()
True
>>> loadedScaler.getWithScaling() == scaler.getWithScaling()
True
>>> modelPath = temp_path + "/robust-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = RobustScalerModel.load(modelPath)
>>> loadedModel.median == model.median
True
>>> loadedModel.range == model.range
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, lower=0.25, upper=0.75, withCentering=False, withScaling=True,
inputCol=None, outputCol=None, relativeError=0.001):
"""
__init__(self, \\*, lower=0.25, upper=0.75, withCentering=False, withScaling=True, \
inputCol=None, outputCol=None, relativeError=0.001)
"""
super(RobustScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RobustScaler", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, lower=0.25, upper=0.75, withCentering=False, withScaling=True,
inputCol=None, outputCol=None, relativeError=0.001):
"""
setParams(self, \\*, lower=0.25, upper=0.75, withCentering=False, withScaling=True, \
inputCol=None, outputCol=None, relativeError=0.001)
Sets params for this RobustScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.0.0")
def setLower(self, value):
"""
Sets the value of :py:attr:`lower`.
"""
return self._set(lower=value)
@since("3.0.0")
def setUpper(self, value):
"""
Sets the value of :py:attr:`upper`.
"""
return self._set(upper=value)
@since("3.0.0")
def setWithCentering(self, value):
"""
Sets the value of :py:attr:`withCentering`.
"""
return self._set(withCentering=value)
@since("3.0.0")
def setWithScaling(self, value):
"""
Sets the value of :py:attr:`withScaling`.
"""
return self._set(withScaling=value)
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setRelativeError(self, value):
"""
Sets the value of :py:attr:`relativeError`.
"""
return self._set(relativeError=value)
def _create_model(self, java_model):
return RobustScalerModel(java_model)
class RobustScalerModel(JavaModel, _RobustScalerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`RobustScaler`.
.. versionadded:: 3.0.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("3.0.0")
def median(self):
"""
Median of the RobustScalerModel.
"""
return self._call_java("median")
@property
@since("3.0.0")
def range(self):
"""
Quantile range of the RobustScalerModel.
"""
return self._call_java("range")
@inherit_doc
class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A regex based tokenizer that extracts tokens either by using the
provided regex pattern (in Java dialect) to split the text
(default) or repeatedly matching the regex (if gaps is false).
Optional parameters also allow filtering tokens using a minimal
length.
It returns an array of strings that can be empty.
.. versionadded:: 1.4.0
Examples
--------
>>> df = spark.createDataFrame([("A B c",)], ["text"])
>>> reTokenizer = RegexTokenizer()
>>> reTokenizer.setInputCol("text")
RegexTokenizer...
>>> reTokenizer.setOutputCol("words")
RegexTokenizer...
>>> reTokenizer.transform(df).head()
Row(text='A B c', words=['a', 'b', 'c'])
>>> # Change a parameter.
>>> reTokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text='A B c', tokens=['a', 'b', 'c'])
>>> # Temporarily modify a parameter.
>>> reTokenizer.transform(df, {reTokenizer.outputCol: "words"}).head()
Row(text='A B c', words=['a', 'b', 'c'])
>>> reTokenizer.transform(df).head()
Row(text='A B c', tokens=['a', 'b', 'c'])
>>> # Must use keyword arguments to specify params.
>>> reTokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> regexTokenizerPath = temp_path + "/regex-tokenizer"
>>> reTokenizer.save(regexTokenizerPath)
>>> loadedReTokenizer = RegexTokenizer.load(regexTokenizerPath)
>>> loadedReTokenizer.getMinTokenLength() == reTokenizer.getMinTokenLength()
True
>>> loadedReTokenizer.getGaps() == reTokenizer.getGaps()
True
>>> loadedReTokenizer.transform(df).take(1) == reTokenizer.transform(df).take(1)
True
"""
minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)",
typeConverter=TypeConverters.toInt)
gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens " +
"(False)")
pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing",
typeConverter=TypeConverters.toString)
toLowercase = Param(Params._dummy(), "toLowercase", "whether to convert all characters to " +
"lowercase before tokenizing", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, *, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
__init__(self, \\*, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
"""
super(RegexTokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid)
self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+", toLowercase=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
setParams(self, \\*, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
Sets params for this RegexTokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinTokenLength(self, value):
"""
Sets the value of :py:attr:`minTokenLength`.
"""
return self._set(minTokenLength=value)
@since("1.4.0")
def getMinTokenLength(self):
"""
Gets the value of minTokenLength or its default value.
"""
return self.getOrDefault(self.minTokenLength)
@since("1.4.0")
def setGaps(self, value):
"""
Sets the value of :py:attr:`gaps`.
"""
return self._set(gaps=value)
@since("1.4.0")
def getGaps(self):
"""
Gets the value of gaps or its default value.
"""
return self.getOrDefault(self.gaps)
@since("1.4.0")
def setPattern(self, value):
"""
Sets the value of :py:attr:`pattern`.
"""
return self._set(pattern=value)
@since("1.4.0")
def getPattern(self):
"""
Gets the value of pattern or its default value.
"""
return self.getOrDefault(self.pattern)
@since("2.0.0")
def setToLowercase(self, value):
"""
Sets the value of :py:attr:`toLowercase`.
"""
return self._set(toLowercase=value)
@since("2.0.0")
def getToLowercase(self):
"""
Gets the value of toLowercase or its default value.
"""
return self.getOrDefault(self.toLowercase)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@inherit_doc
class SQLTransformer(JavaTransformer, JavaMLReadable, JavaMLWritable):
"""
Implements the transforms which are defined by SQL statement.
Currently we only support SQL syntax like `SELECT ... FROM __THIS__`
where `__THIS__` represents the underlying table of the input dataset.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(0, 1.0, 3.0), (2, 2.0, 5.0)], ["id", "v1", "v2"])
>>> sqlTrans = SQLTransformer(
... statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
>>> sqlTrans.transform(df).head()
Row(id=0, v1=1.0, v2=3.0, v3=4.0, v4=3.0)
>>> sqlTransformerPath = temp_path + "/sql-transformer"
>>> sqlTrans.save(sqlTransformerPath)
>>> loadedSqlTrans = SQLTransformer.load(sqlTransformerPath)
>>> loadedSqlTrans.getStatement() == sqlTrans.getStatement()
True
>>> loadedSqlTrans.transform(df).take(1) == sqlTrans.transform(df).take(1)
True
"""
statement = Param(Params._dummy(), "statement", "SQL statement",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, *, statement=None):
"""
__init__(self, \\*, statement=None)
"""
super(SQLTransformer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, statement=None):
"""
setParams(self, \\*, statement=None)
Sets params for this SQLTransformer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStatement(self, value):
"""
Sets the value of :py:attr:`statement`.
"""
return self._set(statement=value)
@since("1.6.0")
def getStatement(self):
"""
Gets the value of statement or its default value.
"""
return self.getOrDefault(self.statement)
class _StandardScalerParams(HasInputCol, HasOutputCol):
"""
Params for :py:class:`StandardScaler` and :py:class:`StandardScalerModel`.
.. versionadded:: 3.0.0
"""
withMean = Param(Params._dummy(), "withMean", "Center data with mean",
typeConverter=TypeConverters.toBoolean)
withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation",
typeConverter=TypeConverters.toBoolean)
def __init__(self, *args):
super(_StandardScalerParams, self).__init__(*args)
self._setDefault(withMean=False, withStd=True)
@since("1.4.0")
def getWithMean(self):
"""
Gets the value of withMean or its default value.
"""
return self.getOrDefault(self.withMean)
@since("1.4.0")
def getWithStd(self):
"""
Gets the value of withStd or its default value.
"""
return self.getOrDefault(self.withStd)
@inherit_doc
class StandardScaler(JavaEstimator, _StandardScalerParams, JavaMLReadable, JavaMLWritable):
"""
Standardizes features by removing the mean and scaling to unit variance using column summary
statistics on the samples in the training set.
The "unit std" is computed using the `corrected sample standard deviation \
<https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation>`_,
which is computed as the square root of the unbiased sample variance.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> standardScaler = StandardScaler()
>>> standardScaler.setInputCol("a")
StandardScaler...
>>> standardScaler.setOutputCol("scaled")
StandardScaler...
>>> model = standardScaler.fit(df)
>>> model.getInputCol()
'a'
>>> model.setOutputCol("output")
StandardScalerModel...
>>> model.mean
DenseVector([1.0])
>>> model.std
DenseVector([1.4142])
>>> model.transform(df).collect()[1].output
DenseVector([1.4142])
>>> standardScalerPath = temp_path + "/standard-scaler"
>>> standardScaler.save(standardScalerPath)
>>> loadedStandardScaler = StandardScaler.load(standardScalerPath)
>>> loadedStandardScaler.getWithMean() == standardScaler.getWithMean()
True
>>> loadedStandardScaler.getWithStd() == standardScaler.getWithStd()
True
>>> modelPath = temp_path + "/standard-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = StandardScalerModel.load(modelPath)
>>> loadedModel.std == model.std
True
>>> loadedModel.mean == model.mean
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
__init__(self, \\*, withMean=False, withStd=True, inputCol=None, outputCol=None)
"""
super(StandardScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
setParams(self, \\*, withMean=False, withStd=True, inputCol=None, outputCol=None)
Sets params for this StandardScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setWithMean(self, value):
"""
Sets the value of :py:attr:`withMean`.
"""
return self._set(withMean=value)
@since("1.4.0")
def setWithStd(self, value):
"""
Sets the value of :py:attr:`withStd`.
"""
return self._set(withStd=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return StandardScalerModel(java_model)
class StandardScalerModel(JavaModel, _StandardScalerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StandardScaler`.
.. versionadded:: 1.4.0
"""
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.0.0")
def std(self):
"""
Standard deviation of the StandardScalerModel.
"""
return self._call_java("std")
@property
@since("2.0.0")
def mean(self):
"""
Mean of the StandardScalerModel.
"""
return self._call_java("mean")
class _StringIndexerParams(JavaParams, HasHandleInvalid, HasInputCol, HasOutputCol,
HasInputCols, HasOutputCols):
"""
Params for :py:class:`StringIndexer` and :py:class:`StringIndexerModel`.
"""
stringOrderType = Param(Params._dummy(), "stringOrderType",
"How to order labels of string column. The first label after " +
"ordering is assigned an index of 0. Supported options: " +
"frequencyDesc, frequencyAsc, alphabetDesc, alphabetAsc. " +
"Default is frequencyDesc. In case of equal frequency when " +
"under frequencyDesc/Asc, the strings are further sorted " +
"alphabetically",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid data (unseen " +
"or NULL values) in features and label column of string type. " +
"Options are 'skip' (filter out rows with invalid data), " +
"error (throw an error), or 'keep' (put invalid data " +
"in a special additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_StringIndexerParams, self).__init__(*args)
self._setDefault(handleInvalid="error", stringOrderType="frequencyDesc")
@since("2.3.0")
def getStringOrderType(self):
"""
Gets the value of :py:attr:`stringOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringOrderType)
@inherit_doc
class StringIndexer(JavaEstimator, _StringIndexerParams, JavaMLReadable, JavaMLWritable):
"""
A label indexer that maps a string column of labels to an ML column of label indices.
If the input column is numeric, we cast it to string and index the string values.
The indices are in [0, numLabels). By default, this is ordered by label frequencies
so the most frequent label gets index 0. The ordering behavior is controlled by
setting :py:attr:`stringOrderType`. Its default value is 'frequencyDesc'.
.. versionadded:: 1.4.0
Examples
--------
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed",
... stringOrderType="frequencyDesc")
>>> stringIndexer.setHandleInvalid("error")
StringIndexer...
>>> model = stringIndexer.fit(stringIndDf)
>>> model.setHandleInvalid("error")
StringIndexerModel...
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0)]
>>> inverter = IndexToString(inputCol="indexed", outputCol="label2", labels=model.labels)
>>> itd = inverter.transform(td)
>>> sorted(set([(i[0], str(i[1])) for i in itd.select(itd.id, itd.label2).collect()]),
... key=lambda x: x[0])
[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')]
>>> stringIndexerPath = temp_path + "/string-indexer"
>>> stringIndexer.save(stringIndexerPath)
>>> loadedIndexer = StringIndexer.load(stringIndexerPath)
>>> loadedIndexer.getHandleInvalid() == stringIndexer.getHandleInvalid()
True
>>> modelPath = temp_path + "/string-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = StringIndexerModel.load(modelPath)
>>> loadedModel.labels == model.labels
True
>>> indexToStringPath = temp_path + "/index-to-string"
>>> inverter.save(indexToStringPath)
>>> loadedInverter = IndexToString.load(indexToStringPath)
>>> loadedInverter.getLabels() == inverter.getLabels()
True
>>> loadedModel.transform(stringIndDf).take(1) == model.transform(stringIndDf).take(1)
True
>>> stringIndexer.getStringOrderType()
'frequencyDesc'
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="alphabetDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 2.0), (1, 1.0), (2, 0.0), (3, 2.0), (4, 2.0), (5, 0.0)]
>>> fromlabelsModel = StringIndexerModel.from_labels(["a", "b", "c"],
... inputCol="label", outputCol="indexed", handleInvalid="error")
>>> result = fromlabelsModel.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in result.select(result.id, result.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 1.0), (2, 2.0), (3, 0.0), (4, 0.0), (5, 2.0)]
>>> testData = sc.parallelize([Row(id=0, label1="a", label2="e"),
... Row(id=1, label1="b", label2="f"),
... Row(id=2, label1="c", label2="e"),
... Row(id=3, label1="a", label2="f"),
... Row(id=4, label1="a", label2="f"),
... Row(id=5, label1="c", label2="f")], 3)
>>> multiRowDf = spark.createDataFrame(testData)
>>> inputs = ["label1", "label2"]
>>> outputs = ["index1", "index2"]
>>> stringIndexer = StringIndexer(inputCols=inputs, outputCols=outputs)
>>> model = stringIndexer.fit(multiRowDf)
>>> result = model.transform(multiRowDf)
>>> sorted(set([(i[0], i[1], i[2]) for i in result.select(result.id, result.index1,
... result.index2).collect()]), key=lambda x: x[0])
[(0, 0.0, 1.0), (1, 2.0, 0.0), (2, 1.0, 1.0), (3, 0.0, 0.0), (4, 0.0, 0.0), (5, 1.0, 0.0)]
>>> fromlabelsModel = StringIndexerModel.from_arrays_of_labels([["a", "b", "c"], ["e", "f"]],
... inputCols=inputs, outputCols=outputs)
>>> result = fromlabelsModel.transform(multiRowDf)
>>> sorted(set([(i[0], i[1], i[2]) for i in result.select(result.id, result.index1,
... result.index2).collect()]), key=lambda x: x[0])
[(0, 0.0, 0.0), (1, 1.0, 1.0), (2, 2.0, 0.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 2.0, 1.0)]
"""
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None, inputCols=None, outputCols=None,
handleInvalid="error", stringOrderType="frequencyDesc"):
"""
__init__(self, \\*, inputCol=None, outputCol=None, inputCols=None, outputCols=None, \
handleInvalid="error", stringOrderType="frequencyDesc")
"""
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, inputCol=None, outputCol=None, inputCols=None, outputCols=None,
handleInvalid="error", stringOrderType="frequencyDesc"):
"""
setParams(self, \\*, inputCol=None, outputCol=None, inputCols=None, outputCols=None, \
handleInvalid="error", stringOrderType="frequencyDesc")
Sets params for this StringIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return StringIndexerModel(java_model)
@since("2.3.0")
def setStringOrderType(self, value):
"""
Sets the value of :py:attr:`stringOrderType`.
"""
return self._set(stringOrderType=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
class StringIndexerModel(JavaModel, _StringIndexerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StringIndexer`.
.. versionadded:: 1.4.0
"""
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("2.4.0")
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
@classmethod
@since("2.4.0")
def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None):
"""
Construct the model directly from an array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(labels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
@classmethod
@since("3.0.0")
def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None,
handleInvalid=None):
"""
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCols(inputCols)
if outputCols is not None:
model.setOutputCols(outputCols)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
@property
@since("1.5.0")
def labels(self):
"""
Ordered list of labels, corresponding to indices to be assigned.
.. deprecated:: 3.1.0
It will be removed in future versions. Use `labelsArray` method instead.
"""
return self._call_java("labels")
@property
@since("3.0.2")
def labelsArray(self):
"""
Array of ordered list of labels, corresponding to indices to be assigned
for each input column.
"""
return self._call_java("labelsArray")
@inherit_doc
class IndexToString(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A :py:class:`pyspark.ml.base.Transformer` that maps a column of indices back to a new column of
corresponding string values.
The index-string mapping is either from the ML attributes of the input column,
or from user-supplied labels (which take precedence over ML attributes).
.. versionadded:: 1.6.0
See Also
--------
StringIndexer : for converting categorical values into category indices
"""
labels = Param(Params._dummy(), "labels",
"Optional array of labels specifying index-string mapping." +
" If not provided or if empty, then metadata from inputCol is used instead.",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None, labels=None):
"""
__init__(self, \\*, inputCol=None, outputCol=None, labels=None)
"""
super(IndexToString, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, inputCol=None, outputCol=None, labels=None):
"""
setParams(self, \\*, inputCol=None, outputCol=None, labels=None)
Sets params for this IndexToString.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setLabels(self, value):
"""
Sets the value of :py:attr:`labels`.
"""
return self._set(labels=value)
@since("1.6.0")
def getLabels(self):
"""
Gets the value of :py:attr:`labels` or its default value.
"""
return self.getOrDefault(self.labels)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol, HasInputCols, HasOutputCols,
JavaMLReadable, JavaMLWritable):
"""
A feature transformer that filters out stop words from input.
Since 3.0.0, :py:class:`StopWordsRemover` can filter out multiple columns at once by setting
the :py:attr:`inputCols` parameter. Note that when both the :py:attr:`inputCol` and
:py:attr:`inputCols` parameters are set, an Exception will be thrown.
.. versionadded:: 1.6.0
Notes
-----
null values from input array are preserved unless adding null to stopWords explicitly.
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["text"])
>>> remover = StopWordsRemover(stopWords=["b"])
>>> remover.setInputCol("text")
StopWordsRemover...
>>> remover.setOutputCol("words")
StopWordsRemover...
>>> remover.transform(df).head().words == ['a', 'c']
True
>>> stopWordsRemoverPath = temp_path + "/stopwords-remover"
>>> remover.save(stopWordsRemoverPath)
>>> loadedRemover = StopWordsRemover.load(stopWordsRemoverPath)
>>> loadedRemover.getStopWords() == remover.getStopWords()
True
>>> loadedRemover.getCaseSensitive() == remover.getCaseSensitive()
True
>>> loadedRemover.transform(df).take(1) == remover.transform(df).take(1)
True
>>> df2 = spark.createDataFrame([(["a", "b", "c"], ["a", "b"])], ["text1", "text2"])
>>> remover2 = StopWordsRemover(stopWords=["b"])
>>> remover2.setInputCols(["text1", "text2"]).setOutputCols(["words1", "words2"])
StopWordsRemover...
>>> remover2.transform(df2).show()
+---------+------+------+------+
| text1| text2|words1|words2|
+---------+------+------+------+
|[a, b, c]|[a, b]|[a, c]| [a]|
+---------+------+------+------+
...
"""
stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out",
typeConverter=TypeConverters.toListString)
caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " +
"comparison over the stop words", typeConverter=TypeConverters.toBoolean)
locale = Param(Params._dummy(), "locale", "locale of the input. ignored when case sensitive " +
"is true", typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False,
locale=None, inputCols=None, outputCols=None):
"""
__init__(self, \\*, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None, inputCols=None, outputCols=None)
"""
super(StopWordsRemover, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover",
self.uid)
self._setDefault(stopWords=StopWordsRemover.loadDefaultStopWords("english"),
caseSensitive=False, locale=self._java_obj.getLocale())
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False,
locale=None, inputCols=None, outputCols=None):
"""
setParams(self, \\*, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None, inputCols=None, outputCols=None)
Sets params for this StopWordRemover.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStopWords(self, value):
"""
Sets the value of :py:attr:`stopWords`.
"""
return self._set(stopWords=value)
@since("1.6.0")
def getStopWords(self):
"""
Gets the value of :py:attr:`stopWords` or its default value.
"""
return self.getOrDefault(self.stopWords)
@since("1.6.0")
def setCaseSensitive(self, value):
"""
Sets the value of :py:attr:`caseSensitive`.
"""
return self._set(caseSensitive=value)
@since("1.6.0")
def getCaseSensitive(self):
"""
Gets the value of :py:attr:`caseSensitive` or its default value.
"""
return self.getOrDefault(self.caseSensitive)
@since("2.4.0")
def setLocale(self, value):
"""
Sets the value of :py:attr:`locale`.
"""
return self._set(locale=value)
@since("2.4.0")
def getLocale(self):
"""
Gets the value of :py:attr:`locale`.
"""
return self.getOrDefault(self.locale)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("3.0.0")
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
@since("3.0.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@staticmethod
@since("2.0.0")
def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
return list(stopWordsObj.loadDefaultStopWords(language))
@inherit_doc
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A tokenizer that converts the input string to lowercase and then
splits it by white spaces.
.. versionadded:: 1.3.0
Examples
--------
>>> df = spark.createDataFrame([("a b c",)], ["text"])
>>> tokenizer = Tokenizer(outputCol="words")
>>> tokenizer.setInputCol("text")
Tokenizer...
>>> tokenizer.transform(df).head()
Row(text='a b c', words=['a', 'b', 'c'])
>>> # Change a parameter.
>>> tokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text='a b c', tokens=['a', 'b', 'c'])
>>> # Temporarily modify a parameter.
>>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
Row(text='a b c', words=['a', 'b', 'c'])
>>> tokenizer.transform(df).head()
Row(text='a b c', tokens=['a', 'b', 'c'])
>>> # Must use keyword arguments to specify params.
>>> tokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> tokenizerPath = temp_path + "/tokenizer"
>>> tokenizer.save(tokenizerPath)
>>> loadedTokenizer = Tokenizer.load(tokenizerPath)
>>> loadedTokenizer.transform(df).head().tokens == tokenizer.transform(df).head().tokens
True
"""
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None):
"""
__init__(self, \\*, inputCol=None, outputCol=None)
"""
super(Tokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, *, inputCol=None, outputCol=None):
"""
setParams(self, \\*, inputCol=None, outputCol=None)
Sets params for this Tokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@inherit_doc
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
A feature transformer that merges multiple columns into a vector column.
.. versionadded:: 1.4.0
Examples
--------
>>> df = spark.createDataFrame([(1, 0, 3)], ["a", "b", "c"])
>>> vecAssembler = VectorAssembler(outputCol="features")
>>> vecAssembler.setInputCols(["a", "b", "c"])
VectorAssembler...
>>> vecAssembler.transform(df).head().features
DenseVector([1.0, 0.0, 3.0])
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
DenseVector([1.0, 0.0, 3.0])
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
>>> vecAssembler.transform(df, params).head().vector
DenseVector([0.0, 1.0])
>>> vectorAssemblerPath = temp_path + "/vector-assembler"
>>> vecAssembler.save(vectorAssemblerPath)
>>> loadedAssembler = VectorAssembler.load(vectorAssemblerPath)
>>> loadedAssembler.transform(df).head().freqs == vecAssembler.transform(df).head().freqs
True
>>> dfWithNullsAndNaNs = spark.createDataFrame(
... [(1.0, 2.0, None), (3.0, float("nan"), 4.0), (5.0, 6.0, 7.0)], ["a", "b", "c"])
>>> vecAssembler2 = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features",
... handleInvalid="keep")
>>> vecAssembler2.transform(dfWithNullsAndNaNs).show()
+---+---+----+-------------+
| a| b| c| features|
+---+---+----+-------------+
|1.0|2.0|null|[1.0,2.0,NaN]|
|3.0|NaN| 4.0|[3.0,NaN,4.0]|
|5.0|6.0| 7.0|[5.0,6.0,7.0]|
+---+---+----+-------------+
...
>>> vecAssembler2.setParams(handleInvalid="skip").transform(dfWithNullsAndNaNs).show()
+---+---+---+-------------+
| a| b| c| features|
+---+---+---+-------------+
|5.0|6.0|7.0|[5.0,6.0,7.0]|
+---+---+---+-------------+
...
"""
handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data (NULL " +
"and NaN values). Options are 'skip' (filter out rows with invalid " +
"data), 'error' (throw an error), or 'keep' (return relevant number " +
"of NaN in the output). Column lengths are taken from the size of ML " +
"Attribute Group, which can be set using `VectorSizeHint` in a " +
"pipeline before `VectorAssembler`. Column lengths can also be " +
"inferred from first rows of the data since it is safe to do so but " +
"only in case of 'error' or 'skip').",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, *, inputCols=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, \\*, inputCols=None, outputCol=None, handleInvalid="error")
"""
super(VectorAssembler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid)
self._setDefault(handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, inputCols=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, \\*, inputCols=None, outputCol=None, handleInvalid="error")
Sets params for this VectorAssembler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
class _VectorIndexerParams(HasInputCol, HasOutputCol, HasHandleInvalid):
"""
Params for :py:class:`VectorIndexer` and :py:class:`VectorIndexerModel`.
.. versionadded:: 3.0.0
"""
maxCategories = Param(Params._dummy(), "maxCategories",
"Threshold for the number of values a categorical feature can take " +
"(>= 2). If a feature is found to have > maxCategories values, then " +
"it is declared continuous.", typeConverter=TypeConverters.toInt)
handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data " +
"(unseen labels or NULL values). Options are 'skip' (filter out " +
"rows with invalid data), 'error' (throw an error), or 'keep' (put " +
"invalid data in a special additional bucket, at index of the number " +
"of categories of the feature).",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_VectorIndexerParams, self).__init__(*args)
self._setDefault(maxCategories=20, handleInvalid="error")
@since("1.4.0")
def getMaxCategories(self):
"""
Gets the value of maxCategories or its default value.
"""
return self.getOrDefault(self.maxCategories)
@inherit_doc
class VectorIndexer(JavaEstimator, _VectorIndexerParams, JavaMLReadable, JavaMLWritable):
"""
Class for indexing categorical feature columns in a dataset of `Vector`.
This has 2 usage modes:
- Automatically identify categorical features (default behavior)
- This helps process a dataset of unknown vectors into a dataset with some continuous
features and some categorical features. The choice between continuous and categorical
is based upon a maxCategories parameter.
- Set maxCategories to the maximum number of categorical any categorical feature should
have.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories = 2, then feature 0 will be declared categorical and use indices {0, 1},
and feature 1 will be declared continuous.
- Index all features, if all features are categorical
- If maxCategories is set to be very large, then this will build an index of unique
values for all features.
- Warning: This can cause problems if features are continuous since this will collect ALL
unique values to the driver.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories >= 3, then both features will be declared categorical.
This returns a model which can transform categorical features to use 0-based indices.
Index stability:
- This is not guaranteed to choose the same category index across multiple runs.
- If a categorical feature includes value 0, then this is guaranteed to map value 0 to
index 0. This maintains vector sparsity.
- More stability may be added in the future.
TODO: Future extensions: The following functionality is planned for the future:
- Preserve metadata in transform; if a feature's metadata is already present,
do not recompute.
- Specify certain features to not index, either via a parameter or via existing metadata.
- Add warning if a categorical feature has only 1 category.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([-1.0, 0.0]),),
... (Vectors.dense([0.0, 1.0]),), (Vectors.dense([0.0, 2.0]),)], ["a"])
>>> indexer = VectorIndexer(maxCategories=2, inputCol="a")
>>> indexer.setOutputCol("indexed")
VectorIndexer...
>>> model = indexer.fit(df)
>>> indexer.getHandleInvalid()
'error'
>>> model.setOutputCol("output")
VectorIndexerModel...
>>> model.transform(df).head().output
DenseVector([1.0, 0.0])
>>> model.numFeatures
2
>>> model.categoryMaps
{0: {0.0: 0, -1.0: 1}}
>>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test
DenseVector([0.0, 1.0])
>>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"}
>>> model2 = indexer.fit(df, params)
>>> model2.transform(df).head().vector
DenseVector([1.0, 0.0])
>>> vectorIndexerPath = temp_path + "/vector-indexer"
>>> indexer.save(vectorIndexerPath)
>>> loadedIndexer = VectorIndexer.load(vectorIndexerPath)
>>> loadedIndexer.getMaxCategories() == indexer.getMaxCategories()
True
>>> modelPath = temp_path + "/vector-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = VectorIndexerModel.load(modelPath)
>>> loadedModel.numFeatures == model.numFeatures
True
>>> loadedModel.categoryMaps == model.categoryMaps
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
>>> dfWithInvalid = spark.createDataFrame([(Vectors.dense([3.0, 1.0]),)], ["a"])
>>> indexer.getHandleInvalid()
'error'
>>> model3 = indexer.setHandleInvalid("skip").fit(df)
>>> model3.transform(dfWithInvalid).count()
0
>>> model4 = indexer.setParams(handleInvalid="keep", outputCol="indexed").fit(df)
>>> model4.transform(dfWithInvalid).head().indexed
DenseVector([2.0, 1.0])
"""
@keyword_only
def __init__(self, *, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, \\*, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error")
"""
super(VectorIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, \\*, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error")
Sets params for this VectorIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMaxCategories(self, value):
"""
Sets the value of :py:attr:`maxCategories`.
"""
return self._set(maxCategories=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
def _create_model(self, java_model):
return VectorIndexerModel(java_model)
class VectorIndexerModel(JavaModel, _VectorIndexerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`VectorIndexer`.
Transform categorical features to use 0-based indices instead of their original values.
- Categorical features are mapped to indices.
- Continuous features (columns) are left unchanged.
This also appends metadata to the output column, marking features as Numeric (continuous),
Nominal (categorical), or Binary (either continuous or categorical).
Non-ML metadata is not carried over from the input to the output column.
This maintains vector sparsity.
.. versionadded:: 1.4.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("1.4.0")
def numFeatures(self):
"""
Number of features, i.e., length of Vectors which this transforms.
"""
return self._call_java("numFeatures")
@property
@since("1.4.0")
def categoryMaps(self):
"""
Feature value index. Keys are categorical feature indices (column indices).
Values are maps from original features values to 0-based category indices.
If a feature is not in this map, it is treated as continuous.
"""
return self._call_java("javaCategoryMaps")
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
This class takes a feature vector and outputs a new feature vector with a subarray
of the original features.
The subset of features can be specified with either indices (`setIndices()`)
or names (`setNames()`). At least one feature must be selected. Duplicate features
are not allowed, so there can be no overlap between selected indices and names.
The output vector will order features with the selected indices first (in the order given),
followed by the selected names (in the order given).
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),),
... (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),),
... (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], ["features"])
>>> vs = VectorSlicer(outputCol="sliced", indices=[1, 4])
>>> vs.setInputCol("features")
VectorSlicer...
>>> vs.transform(df).head().sliced
DenseVector([2.3, 1.0])
>>> vectorSlicerPath = temp_path + "/vector-slicer"
>>> vs.save(vectorSlicerPath)
>>> loadedVs = VectorSlicer.load(vectorSlicerPath)
>>> loadedVs.getIndices() == vs.getIndices()
True
>>> loadedVs.getNames() == vs.getNames()
True
>>> loadedVs.transform(df).take(1) == vs.transform(df).take(1)
True
"""
indices = Param(Params._dummy(), "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.",
typeConverter=TypeConverters.toListInt)
names = Param(Params._dummy(), "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap with " +
"indices.", typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, *, inputCol=None, outputCol=None, indices=None, names=None):
"""
__init__(self, \\*, inputCol=None, outputCol=None, indices=None, names=None)
"""
super(VectorSlicer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid)
self._setDefault(indices=[], names=[])
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, inputCol=None, outputCol=None, indices=None, names=None):
"""
setParams(self, \\*, inputCol=None, outputCol=None, indices=None, names=None):
Sets params for this VectorSlicer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setIndices(self, value):
"""
Sets the value of :py:attr:`indices`.
"""
return self._set(indices=value)
@since("1.6.0")
def getIndices(self):
"""
Gets the value of indices or its default value.
"""
return self.getOrDefault(self.indices)
@since("1.6.0")
def setNames(self, value):
"""
Sets the value of :py:attr:`names`.
"""
return self._set(names=value)
@since("1.6.0")
def getNames(self):
"""
Gets the value of names or its default value.
"""
return self.getOrDefault(self.names)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
class _Word2VecParams(HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol):
"""
Params for :py:class:`Word2Vec` and :py:class:`Word2VecModel`.
.. versionadded:: 3.0.0
"""
vectorSize = Param(Params._dummy(), "vectorSize",
"the dimension of codes after transforming from words",
typeConverter=TypeConverters.toInt)
numPartitions = Param(Params._dummy(), "numPartitions",
"number of partitions for sentences of words",
typeConverter=TypeConverters.toInt)
minCount = Param(Params._dummy(), "minCount",
"the minimum number of times a token must appear to be included in the " +
"word2vec model's vocabulary", typeConverter=TypeConverters.toInt)
windowSize = Param(Params._dummy(), "windowSize",
"the window size (context words from [-window, window]). Default value is 5",
typeConverter=TypeConverters.toInt)
maxSentenceLength = Param(Params._dummy(), "maxSentenceLength",
"Maximum length (in words) of each sentence in the input data. " +
"Any sentence longer than this threshold will " +
"be divided into chunks up to the size.",
typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_Word2VecParams, self).__init__(*args)
self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
windowSize=5, maxSentenceLength=1000)
@since("1.4.0")
def getVectorSize(self):
"""
Gets the value of vectorSize or its default value.
"""
return self.getOrDefault(self.vectorSize)
@since("1.4.0")
def getNumPartitions(self):
"""
Gets the value of numPartitions or its default value.
"""
return self.getOrDefault(self.numPartitions)
@since("1.4.0")
def getMinCount(self):
"""
Gets the value of minCount or its default value.
"""
return self.getOrDefault(self.minCount)
@since("2.0.0")
def getWindowSize(self):
"""
Gets the value of windowSize or its default value.
"""
return self.getOrDefault(self.windowSize)
@since("2.0.0")
def getMaxSentenceLength(self):
"""
Gets the value of maxSentenceLength or its default value.
"""
return self.getOrDefault(self.maxSentenceLength)
@inherit_doc
class Word2Vec(JavaEstimator, _Word2VecParams, JavaMLReadable, JavaMLWritable):
"""
Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further
natural language processing or machine learning process.
.. versionadded:: 1.4.0
Examples
--------
>>> sent = ("a b " * 100 + "a c " * 10).split(" ")
>>> doc = spark.createDataFrame([(sent,), (sent,)], ["sentence"])
>>> word2Vec = Word2Vec(vectorSize=5, seed=42, inputCol="sentence", outputCol="model")
>>> word2Vec.setMaxIter(10)
Word2Vec...
>>> word2Vec.getMaxIter()
10
>>> word2Vec.clear(word2Vec.maxIter)
>>> model = word2Vec.fit(doc)
>>> model.getMinCount()
5
>>> model.setInputCol("sentence")
Word2VecModel...
>>> model.getVectors().show()
+----+--------------------+
|word| vector|
+----+--------------------+
| a|[0.09511678665876...|
| b|[-1.2028766870498...|
| c|[0.30153277516365...|
+----+--------------------+
...
>>> model.findSynonymsArray("a", 2)
[('b', 0.015859...), ('c', -0.568079...)]
>>> from pyspark.sql.functions import format_number as fmt
>>> model.findSynonyms("a", 2).select("word", fmt("similarity", 5).alias("similarity")).show()
+----+----------+
|word|similarity|
+----+----------+
| b| 0.01586|
| c| -0.56808|
+----+----------+
...
>>> model.transform(doc).head().model
DenseVector([-0.4833, 0.1855, -0.273, -0.0509, -0.4769])
>>> word2vecPath = temp_path + "/word2vec"
>>> word2Vec.save(word2vecPath)
>>> loadedWord2Vec = Word2Vec.load(word2vecPath)
>>> loadedWord2Vec.getVectorSize() == word2Vec.getVectorSize()
True
>>> loadedWord2Vec.getNumPartitions() == word2Vec.getNumPartitions()
True
>>> loadedWord2Vec.getMinCount() == word2Vec.getMinCount()
True
>>> modelPath = temp_path + "/word2vec-model"
>>> model.save(modelPath)
>>> loadedModel = Word2VecModel.load(modelPath)
>>> loadedModel.getVectors().first().word == model.getVectors().first().word
True
>>> loadedModel.getVectors().first().vector == model.getVectors().first().vector
True
>>> loadedModel.transform(doc).take(1) == model.transform(doc).take(1)
True
"""
@keyword_only
def __init__(self, *, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025,
maxIter=1, seed=None, inputCol=None, outputCol=None, windowSize=5,
maxSentenceLength=1000):
"""
__init__(self, \\*, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, \
maxIter=1, seed=None, inputCol=None, outputCol=None, windowSize=5, \
maxSentenceLength=1000)
"""
super(Word2Vec, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025,
maxIter=1, seed=None, inputCol=None, outputCol=None, windowSize=5,
maxSentenceLength=1000):
"""
setParams(self, \\*, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, \
seed=None, inputCol=None, outputCol=None, windowSize=5, \
maxSentenceLength=1000)
Sets params for this Word2Vec.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setVectorSize(self, value):
"""
Sets the value of :py:attr:`vectorSize`.
"""
return self._set(vectorSize=value)
@since("1.4.0")
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
@since("1.4.0")
def setMinCount(self, value):
"""
Sets the value of :py:attr:`minCount`.
"""
return self._set(minCount=value)
@since("2.0.0")
def setWindowSize(self, value):
"""
Sets the value of :py:attr:`windowSize`.
"""
return self._set(windowSize=value)
@since("2.0.0")
def setMaxSentenceLength(self, value):
"""
Sets the value of :py:attr:`maxSentenceLength`.
"""
return self._set(maxSentenceLength=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
def _create_model(self, java_model):
return Word2VecModel(java_model)
class Word2VecModel(JavaModel, _Word2VecParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`Word2Vec`.
.. versionadded:: 1.4.0
"""
@since("1.5.0")
def getVectors(self):
"""
Returns the vector representation of the words as a dataframe
with two fields, word and vector.
"""
return self._call_java("getVectors")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@since("1.5.0")
def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, str):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num)
@since("2.3.0")
def findSynonymsArray(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns an array with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, str):
word = _convert_to_vector(word)
tuples = self._java_obj.findSynonymsArray(word, num)
return list(map(lambda st: (st._1(), st._2()), list(tuples)))
class _PCAParams(HasInputCol, HasOutputCol):
"""
Params for :py:class:`PCA` and :py:class:`PCAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "the number of principal components",
typeConverter=TypeConverters.toInt)
@since("1.5.0")
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
@inherit_doc
class PCA(JavaEstimator, _PCAParams, JavaMLReadable, JavaMLWritable):
"""
PCA trains a model to project vectors to a lower dimensional space of the
top :py:attr:`k` principal components.
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
... (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
... (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
>>> df = spark.createDataFrame(data,["features"])
>>> pca = PCA(k=2, inputCol="features")
>>> pca.setOutputCol("pca_features")
PCA...
>>> model = pca.fit(df)
>>> model.getK()
2
>>> model.setOutputCol("output")
PCAModel...
>>> model.transform(df).collect()[0].output
DenseVector([1.648..., -4.013...])
>>> model.explainedVariance
DenseVector([0.794..., 0.205...])
>>> pcaPath = temp_path + "/pca"
>>> pca.save(pcaPath)
>>> loadedPca = PCA.load(pcaPath)
>>> loadedPca.getK() == pca.getK()
True
>>> modelPath = temp_path + "/pca-model"
>>> model.save(modelPath)
>>> loadedModel = PCAModel.load(modelPath)
>>> loadedModel.pc == model.pc
True
>>> loadedModel.explainedVariance == model.explainedVariance
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, k=None, inputCol=None, outputCol=None):
"""
__init__(self, \\*, k=None, inputCol=None, outputCol=None)
"""
super(PCA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, k=None, inputCol=None, outputCol=None):
"""
setParams(self, \\*, k=None, inputCol=None, outputCol=None)
Set params for this PCA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return PCAModel(java_model)
class PCAModel(JavaModel, _PCAParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`PCA`. Transforms vectors to a lower dimensional space.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.0.0")
def pc(self):
"""
Returns a principal components Matrix.
Each column is one principal component.
"""
return self._call_java("pc")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns a vector of proportions of variance
explained by each principal component.
"""
return self._call_java("explainedVariance")
class _RFormulaParams(HasFeaturesCol, HasLabelCol, HasHandleInvalid):
"""
Params for :py:class:`RFormula` and :py:class:`RFormula`.
.. versionadded:: 3.0.0
"""
formula = Param(Params._dummy(), "formula", "R model formula",
typeConverter=TypeConverters.toString)
forceIndexLabel = Param(Params._dummy(), "forceIndexLabel",
"Force to index label whether it is numeric or string",
typeConverter=TypeConverters.toBoolean)
stringIndexerOrderType = Param(Params._dummy(), "stringIndexerOrderType",
"How to order categories of a string feature column used by " +
"StringIndexer. The last category after ordering is dropped " +
"when encoding strings. Supported options: frequencyDesc, " +
"frequencyAsc, alphabetDesc, alphabetAsc. The default value " +
"is frequencyDesc. When the ordering is set to alphabetDesc, " +
"RFormula drops the same category as R when encoding strings.",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (put invalid data in a special " +
"additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_RFormulaParams, self).__init__(*args)
self._setDefault(forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error")
@since("1.5.0")
def getFormula(self):
"""
Gets the value of :py:attr:`formula`.
"""
return self.getOrDefault(self.formula)
@since("2.1.0")
def getForceIndexLabel(self):
"""
Gets the value of :py:attr:`forceIndexLabel`.
"""
return self.getOrDefault(self.forceIndexLabel)
@since("2.3.0")
def getStringIndexerOrderType(self):
"""
Gets the value of :py:attr:`stringIndexerOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringIndexerOrderType)
@inherit_doc
class RFormula(JavaEstimator, _RFormulaParams, JavaMLReadable, JavaMLWritable):
"""
Implements the transforms required for fitting a dataset against an
R model formula. Currently we support a limited subset of the R
operators, including '~', '.', ':', '+', '-', '*', and '^'.
.. versionadded:: 1.5.0
Notes
-----
Also see the `R formula docs
<http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html>`_.
Examples
--------
>>> df = spark.createDataFrame([
... (1.0, 1.0, "a"),
... (0.0, 2.0, "b"),
... (0.0, 0.0, "a")
... ], ["y", "x", "s"])
>>> rf = RFormula(formula="y ~ x + s")
>>> model = rf.fit(df)
>>> model.getLabelCol()
'label'
>>> model.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> rf.fit(df, {rf.formula: "y ~ . - s"}).transform(df).show()
+---+---+---+--------+-----+
| y| x| s|features|label|
+---+---+---+--------+-----+
|1.0|1.0| a| [1.0]| 1.0|
|0.0|2.0| b| [2.0]| 0.0|
|0.0|0.0| a| [0.0]| 0.0|
+---+---+---+--------+-----+
...
>>> rFormulaPath = temp_path + "/rFormula"
>>> rf.save(rFormulaPath)
>>> loadedRF = RFormula.load(rFormulaPath)
>>> loadedRF.getFormula() == rf.getFormula()
True
>>> loadedRF.getFeaturesCol() == rf.getFeaturesCol()
True
>>> loadedRF.getLabelCol() == rf.getLabelCol()
True
>>> loadedRF.getHandleInvalid() == rf.getHandleInvalid()
True
>>> str(loadedRF)
'RFormula(y ~ x + s) (uid=...)'
>>> modelPath = temp_path + "/rFormulaModel"
>>> model.save(modelPath)
>>> loadedModel = RFormulaModel.load(modelPath)
>>> loadedModel.uid == model.uid
True
>>> loadedModel.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> str(loadedModel)
'RFormulaModel(ResolvedRFormula(label=y, terms=[x,s], hasIntercept=true)) (uid=...)'
"""
@keyword_only
def __init__(self, *, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
__init__(self, \\*, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
"""
super(RFormula, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
setParams(self, \\*, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
Sets params for RFormula.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setFormula(self, value):
"""
Sets the value of :py:attr:`formula`.
"""
return self._set(formula=value)
@since("2.1.0")
def setForceIndexLabel(self, value):
"""
Sets the value of :py:attr:`forceIndexLabel`.
"""
return self._set(forceIndexLabel=value)
@since("2.3.0")
def setStringIndexerOrderType(self, value):
"""
Sets the value of :py:attr:`stringIndexerOrderType`.
"""
return self._set(stringIndexerOrderType=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
def _create_model(self, java_model):
return RFormulaModel(java_model)
def __str__(self):
formulaStr = self.getFormula() if self.isDefined(self.formula) else ""
return "RFormula(%s) (uid=%s)" % (formulaStr, self.uid)
class RFormulaModel(JavaModel, _RFormulaParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`RFormula`. Fitting is required to determine the
factor levels of formula terms.
.. versionadded:: 1.5.0
"""
def __str__(self):
resolvedFormula = self._call_java("resolvedFormula")
return "RFormulaModel(%s) (uid=%s)" % (resolvedFormula, self.uid)
class _SelectorParams(HasFeaturesCol, HasOutputCol, HasLabelCol):
"""
Params for :py:class:`Selector` and :py:class:`SelectorModel`.
.. versionadded:: 3.1.0
"""
selectorType = Param(Params._dummy(), "selectorType",
"The selector type. " +
"Supported options: numTopFeatures (default), percentile, fpr, fdr, fwe.",
typeConverter=TypeConverters.toString)
numTopFeatures = \
Param(Params._dummy(), "numTopFeatures",
"Number of features that selector will select, ordered by ascending p-value. " +
"If the number of features is < numTopFeatures, then this will select " +
"all features.", typeConverter=TypeConverters.toInt)
percentile = Param(Params._dummy(), "percentile", "Percentile of features that selector " +
"will select, ordered by ascending p-value.",
typeConverter=TypeConverters.toFloat)
fpr = Param(Params._dummy(), "fpr", "The highest p-value for features to be kept.",
typeConverter=TypeConverters.toFloat)
fdr = Param(Params._dummy(), "fdr", "The upper bound of the expected false discovery rate.",
typeConverter=TypeConverters.toFloat)
fwe = Param(Params._dummy(), "fwe", "The upper bound of the expected family-wise error rate.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_SelectorParams, self).__init__(*args)
self._setDefault(numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1,
fpr=0.05, fdr=0.05, fwe=0.05)
@since("2.1.0")
def getSelectorType(self):
"""
Gets the value of selectorType or its default value.
"""
return self.getOrDefault(self.selectorType)
@since("2.0.0")
def getNumTopFeatures(self):
"""
Gets the value of numTopFeatures or its default value.
"""
return self.getOrDefault(self.numTopFeatures)
@since("2.1.0")
def getPercentile(self):
"""
Gets the value of percentile or its default value.
"""
return self.getOrDefault(self.percentile)
@since("2.1.0")
def getFpr(self):
"""
Gets the value of fpr or its default value.
"""
return self.getOrDefault(self.fpr)
@since("2.2.0")
def getFdr(self):
"""
Gets the value of fdr or its default value.
"""
return self.getOrDefault(self.fdr)
@since("2.2.0")
def getFwe(self):
"""
Gets the value of fwe or its default value.
"""
return self.getOrDefault(self.fwe)
class _Selector(JavaEstimator, _SelectorParams, JavaMLReadable, JavaMLWritable):
"""
Mixin for Selectors.
"""
@since("2.1.0")
def setSelectorType(self, value):
"""
Sets the value of :py:attr:`selectorType`.
"""
return self._set(selectorType=value)
@since("2.0.0")
def setNumTopFeatures(self, value):
"""
Sets the value of :py:attr:`numTopFeatures`.
Only applicable when selectorType = "numTopFeatures".
"""
return self._set(numTopFeatures=value)
@since("2.1.0")
def setPercentile(self, value):
"""
Sets the value of :py:attr:`percentile`.
Only applicable when selectorType = "percentile".
"""
return self._set(percentile=value)
@since("2.1.0")
def setFpr(self, value):
"""
Sets the value of :py:attr:`fpr`.
Only applicable when selectorType = "fpr".
"""
return self._set(fpr=value)
@since("2.2.0")
def setFdr(self, value):
"""
Sets the value of :py:attr:`fdr`.
Only applicable when selectorType = "fdr".
"""
return self._set(fdr=value)
@since("2.2.0")
def setFwe(self, value):
"""
Sets the value of :py:attr:`fwe`.
Only applicable when selectorType = "fwe".
"""
return self._set(fwe=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
class _SelectorModel(JavaModel, _SelectorParams):
"""
Mixin for Selector models.
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("2.0.0")
def selectedFeatures(self):
"""
List of indices to select (filter).
"""
return self._call_java("selectedFeatures")
@inherit_doc
class ChiSqSelector(_Selector, JavaMLReadable, JavaMLWritable):
"""
Chi-Squared feature selection, which selects categorical features to use for predicting a
categorical label.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
.. deprecated:: 3.1.0
Use UnivariateFeatureSelector
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
... [(Vectors.dense([0.0, 0.0, 18.0, 1.0]), 1.0),
... (Vectors.dense([0.0, 1.0, 12.0, 0.0]), 0.0),
... (Vectors.dense([1.0, 0.0, 15.0, 0.1]), 0.0)],
... ["features", "label"])
>>> selector = ChiSqSelector(numTopFeatures=1, outputCol="selectedFeatures")
>>> model = selector.fit(df)
>>> model.getFeaturesCol()
'features'
>>> model.setFeaturesCol("features")
ChiSqSelectorModel...
>>> model.transform(df).head().selectedFeatures
DenseVector([18.0])
>>> model.selectedFeatures
[2]
>>> chiSqSelectorPath = temp_path + "/chi-sq-selector"
>>> selector.save(chiSqSelectorPath)
>>> loadedSelector = ChiSqSelector.load(chiSqSelectorPath)
>>> loadedSelector.getNumTopFeatures() == selector.getNumTopFeatures()
True
>>> modelPath = temp_path + "/chi-sq-selector-model"
>>> model.save(modelPath)
>>> loadedModel = ChiSqSelectorModel.load(modelPath)
>>> loadedModel.selectedFeatures == model.selectedFeatures
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
__init__(self, \\*, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
"""
super(ChiSqSelector, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ChiSqSelector", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
setParams(self, \\*, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
Sets params for this ChiSqSelector.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return ChiSqSelectorModel(java_model)
class ChiSqSelectorModel(_SelectorModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`ChiSqSelector`.
.. versionadded:: 2.0.0
"""
@inherit_doc
class VectorSizeHint(JavaTransformer, HasInputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
A feature transformer that adds size information to the metadata of a vector column.
VectorAssembler needs size information for its input columns and cannot be used on streaming
dataframes without this metadata.
.. versionadded:: 2.3.0
Notes
-----
VectorSizeHint modifies `inputCol` to include size metadata and does not have an outputCol.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml import Pipeline, PipelineModel
>>> data = [(Vectors.dense([1., 2., 3.]), 4.)]
>>> df = spark.createDataFrame(data, ["vector", "float"])
>>>
>>> sizeHint = VectorSizeHint(inputCol="vector", size=3, handleInvalid="skip")
>>> vecAssembler = VectorAssembler(inputCols=["vector", "float"], outputCol="assembled")
>>> pipeline = Pipeline(stages=[sizeHint, vecAssembler])
>>>
>>> pipelineModel = pipeline.fit(df)
>>> pipelineModel.transform(df).head().assembled
DenseVector([1.0, 2.0, 3.0, 4.0])
>>> vectorSizeHintPath = temp_path + "/vector-size-hint-pipeline"
>>> pipelineModel.save(vectorSizeHintPath)
>>> loadedPipeline = PipelineModel.load(vectorSizeHintPath)
>>> loaded = loadedPipeline.transform(df).head().assembled
>>> expected = pipelineModel.transform(df).head().assembled
>>> loaded == expected
True
"""
size = Param(Params._dummy(), "size", "Size of vectors in column.",
typeConverter=TypeConverters.toInt)
handleInvalid = Param(Params._dummy(), "handleInvalid",
"How to handle invalid vectors in inputCol. Invalid vectors include "
"nulls and vectors with the wrong size. The options are `skip` (filter "
"out rows with invalid vectors), `error` (throw an error) and "
"`optimistic` (do not check the vector size, and keep all rows). "
"`error` by default.",
TypeConverters.toString)
@keyword_only
def __init__(self, *, inputCol=None, size=None, handleInvalid="error"):
"""
__init__(self, \\*, inputCol=None, size=None, handleInvalid="error")
"""
super(VectorSizeHint, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSizeHint", self.uid)
self._setDefault(handleInvalid="error")
self.setParams(**self._input_kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, *, inputCol=None, size=None, handleInvalid="error"):
"""
setParams(self, \\*, inputCol=None, size=None, handleInvalid="error")
Sets params for this VectorSizeHint.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def getSize(self):
""" Gets size param, the size of vectors in `inputCol`."""
return self.getOrDefault(self.size)
@since("2.3.0")
def setSize(self, value):
""" Sets size param, the size of vectors in `inputCol`."""
return self._set(size=value)
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
class _VarianceThresholdSelectorParams(HasFeaturesCol, HasOutputCol):
"""
Params for :py:class:`VarianceThresholdSelector` and
:py:class:`VarianceThresholdSelectorModel`.
.. versionadded:: 3.1.0
"""
varianceThreshold = Param(Params._dummy(), "varianceThreshold",
"Param for variance threshold. Features with a variance not " +
"greater than this threshold will be removed. The default value " +
"is 0.0.", typeConverter=TypeConverters.toFloat)
@since("3.1.0")
def getVarianceThreshold(self):
"""
Gets the value of varianceThreshold or its default value.
"""
return self.getOrDefault(self.varianceThreshold)
@inherit_doc
class VarianceThresholdSelector(JavaEstimator, _VarianceThresholdSelectorParams, JavaMLReadable,
JavaMLWritable):
"""
Feature selector that removes all low-variance features. Features with a
variance not greater than the threshold will be removed. The default is to keep
all features with non-zero variance, i.e. remove the features that have the
same value in all samples.
.. versionadded:: 3.1.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
... [(Vectors.dense([6.0, 7.0, 0.0, 7.0, 6.0, 0.0]),),
... (Vectors.dense([0.0, 9.0, 6.0, 0.0, 5.0, 9.0]),),
... (Vectors.dense([0.0, 9.0, 3.0, 0.0, 5.0, 5.0]),),
... (Vectors.dense([0.0, 9.0, 8.0, 5.0, 6.0, 4.0]),),
... (Vectors.dense([8.0, 9.0, 6.0, 5.0, 4.0, 4.0]),),
... (Vectors.dense([8.0, 9.0, 6.0, 0.0, 0.0, 0.0]),)],
... ["features"])
>>> selector = VarianceThresholdSelector(varianceThreshold=8.2, outputCol="selectedFeatures")
>>> model = selector.fit(df)
>>> model.getFeaturesCol()
'features'
>>> model.setFeaturesCol("features")
VarianceThresholdSelectorModel...
>>> model.transform(df).head().selectedFeatures
DenseVector([6.0, 7.0, 0.0])
>>> model.selectedFeatures
[0, 3, 5]
>>> varianceThresholdSelectorPath = temp_path + "/variance-threshold-selector"
>>> selector.save(varianceThresholdSelectorPath)
>>> loadedSelector = VarianceThresholdSelector.load(varianceThresholdSelectorPath)
>>> loadedSelector.getVarianceThreshold() == selector.getVarianceThreshold()
True
>>> modelPath = temp_path + "/variance-threshold-selector-model"
>>> model.save(modelPath)
>>> loadedModel = VarianceThresholdSelectorModel.load(modelPath)
>>> loadedModel.selectedFeatures == model.selectedFeatures
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", outputCol=None, varianceThreshold=0.0):
"""
__init__(self, \\*, featuresCol="features", outputCol=None, varianceThreshold=0.0)
"""
super(VarianceThresholdSelector, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.VarianceThresholdSelector", self.uid)
self._setDefault(varianceThreshold=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.1.0")
def setParams(self, *, featuresCol="features", outputCol=None, varianceThreshold=0.0):
"""
setParams(self, \\*, featuresCol="features", outputCol=None, varianceThreshold=0.0)
Sets params for this VarianceThresholdSelector.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.1.0")
def setVarianceThreshold(self, value):
"""
Sets the value of :py:attr:`varianceThreshold`.
"""
return self._set(varianceThreshold=value)
@since("3.1.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.1.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def _create_model(self, java_model):
return VarianceThresholdSelectorModel(java_model)
class VarianceThresholdSelectorModel(JavaModel, _VarianceThresholdSelectorParams, JavaMLReadable,
JavaMLWritable):
"""
Model fitted by :py:class:`VarianceThresholdSelector`.
.. versionadded:: 3.1.0
"""
@since("3.1.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.1.0")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("3.1.0")
def selectedFeatures(self):
"""
List of indices to select (filter).
"""
return self._call_java("selectedFeatures")
class _UnivariateFeatureSelectorParams(HasFeaturesCol, HasOutputCol, HasLabelCol):
"""
Params for :py:class:`UnivariateFeatureSelector` and
:py:class:`UnivariateFeatureSelectorModel`.
.. versionadded:: 3.1.0
"""
featureType = Param(Params._dummy(), "featureType",
"The feature type. " +
"Supported options: categorical, continuous.",
typeConverter=TypeConverters.toString)
labelType = Param(Params._dummy(), "labelType",
"The label type. " +
"Supported options: categorical, continuous.",
typeConverter=TypeConverters.toString)
selectionMode = Param(Params._dummy(), "selectionMode",
"The selection mode. " +
"Supported options: numTopFeatures (default), percentile, fpr, " +
"fdr, fwe.",
typeConverter=TypeConverters.toString)
selectionThreshold = Param(Params._dummy(), "selectionThreshold", "The upper bound of the " +
"features that selector will select.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_UnivariateFeatureSelectorParams, self).__init__(*args)
self._setDefault(selectionMode="numTopFeatures")
@since("3.1.1")
def getFeatureType(self):
"""
Gets the value of featureType or its default value.
"""
return self.getOrDefault(self.featureType)
@since("3.1.1")
def getLabelType(self):
"""
Gets the value of labelType or its default value.
"""
return self.getOrDefault(self.labelType)
@since("3.1.1")
def getSelectionMode(self):
"""
Gets the value of selectionMode or its default value.
"""
return self.getOrDefault(self.selectionMode)
@since("3.1.1")
def getSelectionThreshold(self):
"""
Gets the value of selectionThreshold or its default value.
"""
return self.getOrDefault(self.selectionThreshold)
@inherit_doc
class UnivariateFeatureSelector(JavaEstimator, _UnivariateFeatureSelectorParams, JavaMLReadable,
JavaMLWritable):
"""
UnivariateFeatureSelector
The user can set `featureType` and `labelType`, and Spark will pick the score function based on
the specified `featureType` and `labelType`.
The following combination of `featureType` and `labelType` are supported:
- `featureType` `categorical` and `labelType` `categorical`, Spark uses chi-squared,
i.e. chi2 in sklearn.
- `featureType` `continuous` and `labelType` `categorical`, Spark uses ANOVATest,
i.e. f_classif in sklearn.
- `featureType` `continuous` and `labelType` `continuous`, Spark uses F-value,
i.e. f_regression in sklearn.
The `UnivariateFeatureSelector` supports different selection modes: `numTopFeatures`,
`percentile`, `fpr`, `fdr`, `fwe`.
- `numTopFeatures` chooses a fixed number of top features according to a according to a
hypothesis.
- `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
- `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
- `fdr` uses the `Benjamini-Hochberg procedure \
<https://en.wikipedia.org/wiki/False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
- `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1 / `numFeatures`, thus controlling the family-wise error rate of selection.
By default, the selection mode is `numTopFeatures`.
.. versionadded:: 3.1.1
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
... [(Vectors.dense([1.7, 4.4, 7.6, 5.8, 9.6, 2.3]), 3.0),
... (Vectors.dense([8.8, 7.3, 5.7, 7.3, 2.2, 4.1]), 2.0),
... (Vectors.dense([1.2, 9.5, 2.5, 3.1, 8.7, 2.5]), 1.0),
... (Vectors.dense([3.7, 9.2, 6.1, 4.1, 7.5, 3.8]), 2.0),
... (Vectors.dense([8.9, 5.2, 7.8, 8.3, 5.2, 3.0]), 4.0),
... (Vectors.dense([7.9, 8.5, 9.2, 4.0, 9.4, 2.1]), 4.0)],
... ["features", "label"])
>>> selector = UnivariateFeatureSelector(outputCol="selectedFeatures")
>>> selector.setFeatureType("continuous").setLabelType("categorical").setSelectionThreshold(1)
UnivariateFeatureSelector...
>>> model = selector.fit(df)
>>> model.getFeaturesCol()
'features'
>>> model.setFeaturesCol("features")
UnivariateFeatureSelectorModel...
>>> model.transform(df).head().selectedFeatures
DenseVector([7.6])
>>> model.selectedFeatures
[2]
>>> selectorPath = temp_path + "/selector"
>>> selector.save(selectorPath)
>>> loadedSelector = UnivariateFeatureSelector.load(selectorPath)
>>> loadedSelector.getSelectionThreshold() == selector.getSelectionThreshold()
True
>>> modelPath = temp_path + "/selector-model"
>>> model.save(modelPath)
>>> loadedModel = UnivariateFeatureSelectorModel.load(modelPath)
>>> loadedModel.selectedFeatures == model.selectedFeatures
True
>>> loadedModel.transform(df).take(1) == model.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", outputCol=None,
labelCol="label", selectionMode="numTopFeatures"):
"""
__init__(self, \\*, featuresCol="features", outputCol=None, \
labelCol="label", selectionMode="numTopFeatures")
"""
super(UnivariateFeatureSelector, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.UnivariateFeatureSelector",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.1.1")
def setParams(self, *, featuresCol="features", outputCol=None,
labelCol="labels", selectionMode="numTopFeatures"):
"""
setParams(self, \\*, featuresCol="features", outputCol=None, \
labelCol="labels", selectionMode="numTopFeatures")
Sets params for this UnivariateFeatureSelector.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.1.1")
def setFeatureType(self, value):
"""
Sets the value of :py:attr:`featureType`.
"""
return self._set(featureType=value)
@since("3.1.1")
def setLabelType(self, value):
"""
Sets the value of :py:attr:`labelType`.
"""
return self._set(labelType=value)
@since("3.1.1")
def setSelectionMode(self, value):
"""
Sets the value of :py:attr:`selectionMode`.
"""
return self._set(selectionMode=value)
@since("3.1.1")
def setSelectionThreshold(self, value):
"""
Sets the value of :py:attr:`selectionThreshold`.
"""
return self._set(selectionThreshold=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def _create_model(self, java_model):
return UnivariateFeatureSelectorModel(java_model)
class UnivariateFeatureSelectorModel(JavaModel, _UnivariateFeatureSelectorParams, JavaMLReadable,
JavaMLWritable):
"""
Model fitted by :py:class:`UnivariateFeatureSelector`.
.. versionadded:: 3.1.1
"""
@since("3.1.1")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.1.1")
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@property
@since("3.1.1")
def selectedFeatures(self):
"""
List of indices to select (filter).
"""
return self._call_java("selectedFeatures")
if __name__ == "__main__":
import doctest
import sys
import tempfile
import pyspark.ml.feature
from pyspark.sql import Row, SparkSession
globs = globals().copy()
features = pyspark.ml.feature.__dict__.copy()
globs.update(features)
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.feature tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"),
Row(id=2, label="c"), Row(id=3, label="a"),
Row(id=4, label="a"), Row(id=5, label="c")], 2)
globs['stringIndDf'] = spark.createDataFrame(testData)
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
edublancas/sklearn-evaluation | src/sklearn_evaluation/SQLiteTracker.py | 1 | 5748 | from uuid import uuid4
import sqlite3
import json
import pandas as pd
from sklearn_evaluation.table import Table
class SQLiteTracker:
"""A simple experiment tracker using SQLite
:doc:`Click here <../user_guide/SQLiteTracker>` to see the user guide.
Parameters
----------
path
Database location
"""
def __init__(self, path: str):
self.conn = sqlite3.connect(path)
cur = self.conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS experiments (
uuid TEXT NOT NULL UNIQUE,
created TIMESTAMP default current_timestamp,
parameters TEXT,
comment TEXT
)
""")
cur.close()
def __getitem__(self, uuid):
"""Get experiment with a given uuid
"""
# TODO: make it work for a list of uuids
return pd.read_sql('SELECT * FROM experiments WHERE uuid = ?',
self.conn,
params=[uuid],
index_col='uuid')
def recent(self, n=5, normalize=False):
"""Get most recent experiments as a pandas.DataFrame
"""
query = """
SELECT uuid, created, parameters, comment
FROM experiments
ORDER BY created DESC
LIMIT ?
"""
df = pd.read_sql(query, self.conn, params=[n], index_col='uuid')
if normalize:
# parse and normalize json
parameters = pd.json_normalize(
df.pop('parameters').apply(lambda s: json.loads(s))).set_index(
df.index)
df = df.join(parameters)
# re order columns to show "comment" at the end
comment = df.pop('comment')
df.insert(len(df.columns), 'comment', comment)
return df
def query(self, code):
"""Query the database, returns a pandas.DataFrame
Examples
--------
>>> from sklearn_evaluation import SQLiteTracker
>>> tracker = SQLiteTracker(':memory:') # example in-memory db
>>> tracker.insert('my_uuid', {'a': 1})
>>> df = tracker.query(
... "SELECT uuid, json_extract(parameters, '$.a') FROM experiments")
"""
df = pd.read_sql(code, self.conn)
if 'uuid' in df:
df = df.set_index('uuid')
return df
def new(self):
"""Create a new experiment, returns a uuid
"""
uuid = uuid4().hex
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO experiments (uuid)
VALUES(?)
""", [uuid])
cur.close()
self.conn.commit()
return uuid
def update(self, uuid, parameters):
"""Update the parameters of an empty experiment given its uuid
"""
self._can_update(uuid)
cur = self.conn.cursor()
cur.execute(
"""
UPDATE experiments
SET parameters = ?
WHERE uuid = ?
""", [json.dumps(parameters), uuid])
cur.close()
self.conn.commit()
def insert(self, uuid, parameters):
"""Insert a new experiment
"""
cur = self.conn.cursor()
cur.execute(
"""
INSERT INTO experiments (uuid, parameters)
VALUES(?, ?)
""", [uuid, json.dumps(parameters)])
cur.close()
self.conn.commit()
def comment(self, uuid, comment):
"""Add a comment to an experiment given its uuid
"""
# TODO: add overwrite (false by default) and append options
cur = self.conn.cursor()
cur.execute(
"""
UPDATE experiments
SET comment = ?
WHERE uuid = ?
""", [comment, uuid])
cur.close()
self.conn.commit()
def _recent(self, n=5, fmt='html'):
if fmt not in {'html', 'plain'}:
raise ValueError('fmt must be one "html" or "plain"')
cur = self.conn.cursor()
cur.execute(
"""
SELECT uuid, created, parameters, comment
FROM experiments
ORDER BY created DESC
LIMIT ?
""", [n])
res = cur.fetchall()
table = Table(res, header=['uuid', 'created', 'parameters', 'comment'])
title_template = '<h4> {} </h4>' if fmt == 'html' else '{}\n'
title = title_template.format(type(self).__name__)
if not len(table):
title += '(No experiments saved yet)'
if fmt == 'plain':
title += '\n'
if len(table):
footer = (('<br>' if fmt == 'html' else '\n') +
'(Most recent experiments)')
else:
footer = ''
return (title + (table.to_html() if fmt == 'html' else str(table)) +
footer)
def _can_update(self, uuid):
"""Check if an experiment with a given uuid can be updated
"""
cur = self.conn.cursor()
cur.execute(
"""
SELECT parameters
FROM experiments
WHERE uuid = ?
""", [uuid])
row = cur.fetchone()
exists = row is not None
if exists:
empty = row[0] is None
if not empty:
raise ValueError('Cannot update non-empty experiment with '
'uuid "{}"'.format(uuid))
else:
raise ValueError('Cannot update experiment with '
'uuid "{}" because it does '
'not exist'.format(uuid))
def __repr__(self):
return self._recent(fmt='plain')
def _repr_html_(self):
return self._recent(fmt='html')
def __del__(self):
self.conn.close()
| mit |
florian-f/sklearn | sklearn/ensemble/forest.py | 1 | 51881 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremly randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe, Brian Holt
# License: BSD 3
from __future__ import division
import itertools
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import xrange
from ..feature_selection.selector_mixin import SelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import array2d, check_random_state, check_arrays, safe_asarray
from ..utils.fixes import bincount
from .base import BaseEnsemble
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(n_trees, forest, X, y, sample_weight,
sample_mask, X_argsorted, seeds, verbose):
"""Private function used to build a batch of trees within a job."""
trees = []
for i in range(n_trees):
random_state = check_random_state(seeds[i])
if verbose > 1:
print("building tree %d of %d" % (i + 1, n_trees))
seed = random_state.randint(MAX_INT)
tree = forest._make_estimator(append=False)
tree.set_params(random_state=check_random_state(seed))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = random_state.randint(0, n_samples, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
curr_sample_mask = sample_mask.copy()
curr_sample_mask[sample_counts == 0] = False
tree.fit(X, y,
sample_weight=curr_sample_weight,
sample_mask=curr_sample_mask,
X_argsorted=X_argsorted,
check_input=False)
tree.indices_ = curr_sample_mask
else:
tree.fit(X, y,
sample_weight=sample_weight,
sample_mask=sample_mask,
X_argsorted=X_argsorted,
check_input=False)
trees.append(tree)
return trees
def _parallel_predict_proba(trees, X, n_classes, n_outputs):
"""Private function used to compute a batch of predictions within a job."""
n_samples = X.shape[0]
if n_outputs == 1:
proba = np.zeros((n_samples, n_classes))
for tree in trees:
proba_tree = tree.predict_proba(X)
if n_classes == tree.n_classes_:
proba += proba_tree
else:
for j, c in enumerate(tree.classes_):
proba[:, c] += proba_tree[:, j]
else:
proba = []
for k in xrange(n_outputs):
proba.append(np.zeros((n_samples, n_classes[k])))
for tree in trees:
proba_tree = tree.predict_proba(X)
for k in xrange(n_outputs):
if n_classes[k] == tree.n_classes_[k]:
proba[k] += proba_tree[k]
else:
for j, c in enumerate(tree.classes_[k]):
proba[k][:, c] += proba_tree[k][:, j]
return proba
def _parallel_predict_regression(trees, X):
"""Private function used to compute a batch of predictions within a job."""
return sum(tree.predict(X) for tree in trees)
def _partition_trees(forest):
"""Private function used to partition trees between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), forest.n_estimators)
else:
n_jobs = min(forest.n_jobs, forest.n_estimators)
# Partition trees between jobs
n_trees = [forest.n_estimators // n_jobs] * n_jobs
for i in range(forest.n_estimators % n_jobs):
n_trees[i] += 1
starts = [0] * (n_jobs + 1)
for i in range(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_trees[i - 1]
return n_jobs, n_trees, starts
def _parallel_X_argsort(X):
"""Private function used to sort the features of X."""
return np.asarray(np.argsort(X.T, axis=1).T, dtype=np.int32, order="F")
def _partition_features(forest, n_total_features):
"""Private function used to partition features between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), n_total_features)
else:
n_jobs = min(forest.n_jobs, n_total_features)
# Partition features between jobs
n_features = [n_total_features // n_jobs] * n_jobs
for i in xrange(n_total_features % n_jobs):
n_features[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_features[i - 1]
return n_jobs, n_features, starts
class BaseForest(BaseEnsemble, SelectorMixin):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
if compute_importances:
warn("Setting compute_importances=True is no longer "
"required. Variable importances are now computed on the fly "
"when accessing the feature_importances_ attribute. This "
"parameter will be removed in 0.15.", DeprecationWarning)
self.compute_importances = compute_importances
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.verbose = verbose
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = array2d(X, dtype=np.float32, order='C')
return np.array([est.tree_.apply(X) for est in self.estimators_]).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Precompute some data
X, y = check_arrays(X, y, sparse_format="dense")
if (getattr(X, "dtype", None) != DTYPE or
X.ndim != 2 or
not X.flags.fortran):
X = array2d(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_jobs, _, starts = _partition_features(self, self.n_features_)
all_X_argsorted = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_X_argsort)(
X[:, starts[i]:starts[i + 1]])
for i in xrange(n_jobs))
X_argsorted = np.asfortranarray(np.hstack(all_X_argsorted))
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if isinstance(self.base_estimator, ClassifierMixin):
y = np.copy(y)
if self.n_outputs_ == 1:
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
else:
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
if self.n_outputs_ == 1:
self.classes_ = None
self.n_classes_ = 1
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Assign chunk of trees to jobs
n_jobs, n_trees, _ = _partition_trees(self)
# Precalculate the random states
seeds = [random_state.randint(MAX_INT, size=i) for i in n_trees]
# Parallel loop
all_trees = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_trees)(
n_trees[i],
self,
X,
y,
sample_weight,
sample_mask,
X_argsorted,
seeds[i],
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ = [tree for tree in itertools.chain(*all_trees)]
# Calculate out of bag predictions and score
if self.oob_score:
if isinstance(self, ClassifierMixin):
self.oob_decision_function_ = []
self.oob_score_ = 0.0
n_classes_ = self.n_classes_
classes_ = self.classes_
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
self.oob_decision_function_.append(decision)
self.oob_score_ += np.mean(
(y[:, k] == classes_[k].take(
np.argmax(predictions[k], axis=1),
axis=0)))
if self.n_outputs_ == 1:
self.oob_decision_function_ = \
self.oob_decision_function_[0]
self.oob_score_ /= self.n_outputs_
else:
# Regression:
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
return self
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
return sum(tree.feature_importances_
for tree in self.estimators_) / self.n_estimators
class ForestClassifier(BaseForest, ClassifierMixin):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.n_outputs_)
for i in range(n_jobs))
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= self.n_estimators
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the mean predicted class log-probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(BaseForest, RegressorMixin):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`classes_`: array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
`n_classes_`: int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`classes_`: array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
`n_classes_`: int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
Maximum depth of each tree.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_density=0.1,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = 1
def fit(self, X, y=None):
"""Fit estimator.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
"""
self.fit_transform(X, y)
return self
def fit_transform(self, X, y=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = safe_asarray(X)
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y)
self.one_hot_encoder_ = OneHotEncoder()
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data to be transformed.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
mtconley/turntable | test/lib/python2.7/site-packages/scipy/interpolate/ndgriddata.py | 12 | 7174 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
points : (Npoints, Ndims) ndarray of floats
Data point coordinates.
values : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
>>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| mit |
pratapvardhan/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
faneshion/MatchZoo | matchzoo/preprocessors/naive_preprocessor.py | 1 | 2195 | """Naive Preprocessor."""
from tqdm import tqdm
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo import DataPack
from .chain_transform import chain_transform
from .build_vocab_unit import build_vocab_unit
from . import units
tqdm.pandas()
class NaivePreprocessor(BasePreprocessor):
"""
Naive preprocessor.
Example:
>>> import matchzoo as mz
>>> train_data = mz.datasets.toy.load_data()
>>> test_data = mz.datasets.toy.load_data(stage='test')
>>> preprocessor = mz.preprocessors.NaivePreprocessor()
>>> train_data_processed = preprocessor.fit_transform(train_data,
... verbose=0)
>>> type(train_data_processed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
>>> test_data_transformed = preprocessor.transform(test_data,
... verbose=0)
>>> type(test_data_transformed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
"""
def fit(self, data_pack: DataPack, verbose: int = 1):
"""
Fit pre-processing context for transformation.
:param data_pack: data_pack to be preprocessed.
:param verbose: Verbosity.
:return: class:`NaivePreprocessor` instance.
"""
func = chain_transform(self._default_units())
data_pack = data_pack.apply_on_text(func, verbose=verbose)
vocab_unit = build_vocab_unit(data_pack, verbose=verbose)
self._context['vocab_unit'] = vocab_unit
return self
def transform(self, data_pack: DataPack, verbose: int = 1) -> DataPack:
"""
Apply transformation on data, create `tri-letter` representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
units_ = self._default_units()
units_.append(self._context['vocab_unit'])
units_.append(units.FixedLength(text_length=30, pad_mode='post'))
func = chain_transform(units_)
return data_pack.apply_on_text(func, verbose=verbose)
| apache-2.0 |
NunoEdgarGub1/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
gkunter/coquery | coquery/gui/textgridview.py | 1 | 13010 | # -*- coding: utf-8 -*-
"""
textgridview.py is part of Coquery.
Copyright (c) 2017 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
import types
import numpy as np
import sys
import matplotlib as mpl
mpl.use("Qt5Agg")
mpl.rcParams["backend"] = "Qt5Agg"
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.widgets import SpanSelector
from matplotlib.patches import Rectangle
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import seaborn as sns
from scipy import signal
from .pyqt_compat import QtWidgets, QtCore, QtGui
class LockedAxes(mpl.axes.Axes):
"""
Custom Axes class that allows only panning along the x axis. Based on
http://stackoverflow.com/a/16709952/5215507
"""
name = "LockedAxes"
def drag_pan(self, button, key, x, y):
mpl.axes.Axes.drag_pan(self, button, 'x', x, y)
mpl.projections.register_projection(LockedAxes)
def press_zoom(self, event):
"""
Method that is used to limit zoom to the x axis. Based on
http://stackoverflow.com/a/16709952/5215507
"""
event.key = 'x'
NavigationToolbar.press_zoom(self, event)
class CoqFigure(Figure):
def tight_layout(self, *args, **kwargs):
super(CoqFigure, self).tight_layout(*args, **kwargs)
self.subplots_adjust(hspace=0)
class CoqTextgridView(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(CoqTextgridView, self).__init__(*args, **kwargs)
self._dynamic_range = 50
self._window_length = 0.005
self._textgrid = None
self._sound = None
self._spectrogram = None
self._drag = False
self._start_pos = None
self.scrollbar = QtWidgets.QScrollBar(QtCore.Qt.Horizontal)
self.scrollbar.valueChanged.connect(self.change_position)
self.figure = CoqFigure()
self.canvas = FigureCanvas(self.figure)
self.canvas.setParent(self)
self.canvas.mpl_connect('key_press_event', self.on_key_press)
self.canvas.mpl_connect('button_release_event', self.on_button_release)
self.canvas.mpl_connect('button_press_event', self.on_button_press)
self.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
#self.toolbar = NavigationToolbar(self.canvas, self)
#self.toolbar.press_zoom = types.MethodType(press_zoom, self.toolbar)
self.canvas.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
self.canvas.updateGeometry()
gs = mpl.gridspec.GridSpec(3, 1, height_ratios=[2.5, 5, 2.5])
self.ax_waveform = self.figure.add_subplot(gs[0],
projection="LockedAxes")
self.ax_spectrogram = self.figure.add_subplot(gs[1],
sharex=self.ax_waveform,
projection="LockedAxes")
self.ax_textgrid = self.figure.add_subplot(gs[2],
sharex=self.ax_waveform,
projection="LockedAxes")
self.figure.subplots_adjust(hspace=0)
# prepare axes
self.ax_waveform.set_ylim([-1, 1])
self.ax_waveform.set_ylabel("Amplitude")
self.ax_waveform.get_xaxis().set_visible(False)
self.ax_spectrogram.set_ylabel("Frequency (Hz)")
self.ax_spectrogram.get_xaxis().set_visible(False)
self.ax_spectrogram.grid(False)
self.ax_spectrogram.set_ylim([0, 5000])
self.ax_textgrid.set_xlabel("Time (s)")
self.ax_textgrid.xaxis.get_offset_text().set_visible(False)
self.selector_waveform = SpanSelector(
self.ax_waveform, self.on_select, 'horizontal', useblit=True,
rectprops=dict(alpha=0.25, facecolor='red'), span_stays=False,
button=1)
self.selector_spectrogram = SpanSelector(
self.ax_spectrogram, self.on_select, 'horizontal', useblit=True,
rectprops=dict(alpha=0.25, facecolor='red'), span_stays=False,
button=1)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setLayout(layout)
#self.layout().addWidget(self.toolbar)
self.layout().addWidget(self.canvas)
self.layout().addWidget(self.scrollbar)
def on_key_press(self, *args, **kwargs):
pass
def on_button_press(self, event):
print(event)
if event.button == 1:
if event.inaxes == self.ax_textgrid:
print("clicked", event.xdata)
tier = self._textgrid.get_tier_by_name(
self._textgrid.get_tier_names()[0])
print(tier.intervals)
print("nearest", tier.get_annotations_by_time(event.xdata))
interval = tier.get_annotations_by_time(event.xdata)[0]
self._sound.extract_sound(interval.start_time,
interval.end_time).play()
elif event.button == 3:
QtCore.QCoreApplication.instance().setOverrideCursor(
QtGui.QCursor(QtCore.Qt. SizeHorCursor))
self._drag = True
self._start_pos = event.xdata
def on_mouse_move(self, event):
if not event.button:
return
if self._drag and event.xdata:
dist = self._start_pos - event.xdata
trans = self.ax_textgrid.transAxes.transform([dist, 0])[0]
if abs(dist) > 0:
xmin, xmax = self.ax_textgrid.get_xlim()
xmin = xmin - dist
xmax = xmax + dist
self.ax_textgrid.set_xlim(xmin, xmax)
self.check_zoom()
self.canvas.draw()
self.change_slider()
def on_button_release(self, event):
if event.button == 3:
QtCore.QCoreApplication.instance().restoreOverrideCursor()
self._drag = False
self.check_zoom()
def check_zoom(self, *args, **kwargs):
xmin, xmax = self.ax_textgrid.get_xlim()
if xmax - xmin > self._sound.duration():
xmin = 0
xmax = self._sound.duration()
self.ax_textgrid.set_xlim(xmin, xmax)
self.canvas.draw()
elif xmin < 0:
xmax = xmax - xmin
xmin = 0
self.ax_textgrid.set_xlim(xmin, xmax)
self.canvas.draw()
elif xmax > self._sound.duration():
xmin = self._sound.duration() - (xmax - xmin)
xmax = self._sound.duration()
self.ax_textgrid.set_xlim(xmin, xmax)
self.canvas.draw()
self.change_slider()
def change_slider(self):
xmin, xmax = self.ax_textgrid.get_xlim()
ps = max(0, int(1000 * (xmax - xmin) / self._sound.duration()))
val = min(1000 - ps, int(1000 * (xmin / self._sound.duration())))
new_max = 1000 - ps
print(xmin, xmax, self._sound.duration(), new_max, ps, val)
self.scrollbar.blockSignals(True)
self.scrollbar.setMinimum(0)
self.scrollbar.setMaximum(new_max)
self.scrollbar.setPageStep(ps)
self.scrollbar.setValue(val)
self.scrollbar.blockSignals(False)
print("NEW")
print(self.scrollbar.minimum(),
self.scrollbar.maximum(),
self.scrollbar.value(),
self.scrollbar.pageStep())
def change_position(self, i):
pos = self._sound.duration() * i / 1000
xmin, xmax = self.ax_textgrid.get_xlim()
self.ax_textgrid.set_xlim(pos, pos + xmax - xmin)
self.canvas.draw()
def on_select(self, xmin, xmax):
if xmin == xmax:
self._sound.extract_sound(*self.ax_textgrid.get_xlim()).play()
else:
self._sound.extract_sound(xmin, xmax).play()
def change_dynamic_range(self, x):
if x == self.dynamicRange():
return
self.setDynamicRange(int(x))
self.plotSpectrogram()
def change_window_length(self, x):
if x == self.windowLength():
return
self.setWindowLength(float(x))
# new window length requires recalculation of the spectrogram:
self._get_spectrogram()
self.plotSpectrogram()
def _get_spectrogram(self, **kwargs):
Fs = self.sound().framerate
NFFT = int(Fs * self.windowLength())
noverlap = int(Fs * self.windowLength() / 2)
data, self._ydim, self._xdim, _ = plt.specgram(
self.sound().astype(np.int32),
Fs=Fs,
NFFT=NFFT,
noverlap=noverlap,
window=signal.gaussian(M=NFFT, std=noverlap))
self._spectrogram = self.transform(data)
def transform(self, data):
return 10 * np.log10(data)
def normalize(self):
max_db = self._spectrogram.max()
return mpl.colors.SymLogNorm(linthresh=0.03,
vmin=max_db - self.dynamicRange(),
vmax=max_db)
def dynamicRange(self):
return self._dynamic_range
def setDynamicRange(self, x):
self._dynamic_range = x
def windowLength(self):
return self._window_length
def setWindowLength(self, x):
self._window_length = x
def sound(self):
return self._sound
def setSound(self, s):
self._sound = s
def setTextgrid(self, textgrid):
self._textgrid = textgrid
def textgrid(self):
return self._textgrid
def plotSpectrogram(self, cmap="gray_r"):
if self._spectrogram is None:
self._get_spectrogram()
self.ax_spectrogram.pcolormesh(self._xdim, self._ydim,
self._spectrogram,
cmap=cmap,
norm=self.normalize())
self.canvas.draw()
def plotWave(self):
t = np.linspace(0.0, self.sound().duration(), len(self.sound()))
amp = self.sound().astype(np.int32)
self.ax_waveform.plot(t, amp / abs(max(amp)))
def plotTextgrid(self):
tier_labels = []
n_tiers = len(self._textgrid.tiers)
for i, tier in enumerate(self._textgrid.tiers):
tier_labels.append(tier.name)
y_start = 1 - i / n_tiers
y_end = 1 - (i+1) / n_tiers
for interval in tier.intervals:
patch = Rectangle(
(interval.start_time, y_start),
interval.duration(),
y_end - y_start,
fill=False)
self.ax_textgrid.add_patch(patch)
x_pos = interval.start_time + 0.5 * (interval.duration())
self.ax_textgrid.text(
x_pos,
y_start + 0.5 * (y_end - y_start),
interval.text,
verticalalignment="center",
horizontalalignment="center")
self.ax_spectrogram.vlines((interval.start_time,
interval.end_time), 5000, 0)
self.ax_waveform.vlines((interval.start_time,
interval.end_time), -1, 1)
self.ax_textgrid.yaxis.set_ticks(
[(i + 0.5) / n_tiers for i in range(n_tiers)])
self.ax_textgrid.yaxis.set_ticklabels(reversed(tier_labels))
def plotTicks(self):
x_ticks = self.ax_textgrid.get_xticklabels()
x_ticks = [str(self.offset + float(x.get_text()))
if x.get_text() else "" for x in x_ticks]
lower = self.offset + self.ax_textgrid.get_xlim()[0]
upper = self.offset + self.ax_textgrid.get_xlim()[1]
self.ax_textgrid.set_xticklabels(x_ticks)
def display(self, **kwargs):
self.offset = kwargs.get("offset", 0)
if self._textgrid:
self.plotTextgrid()
if self.sound():
self.plotWave()
self.plotSpectrogram()
if self.offset:
self.plotTicks()
self.scrollbar.setMinimum(0)
self.scrollbar.setMaximum(0)
self.scrollbar.setPageStep(1000)
self.scrollbar.setSingleStep(10)
#print("START")
#print(self.scrollbar.minimum(),
#self.scrollbar.maximum(),
#self.scrollbar.value(),
#self.scrollbar.pageStep())
self.ax_textgrid.grid(False)
self.check_zoom()
self.figure.tight_layout()
| gpl-3.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/indexes/numeric.py | 1 | 12487 | import numpy as np
import pandas.lib as lib
import pandas.algos as _algos
import pandas.index as _index
from pandas import compat
from pandas.indexes.base import Index, InvalidIndexError
from pandas.util.decorators import Appender, cache_readonly
import pandas.core.common as com
from pandas.core.common import is_dtype_equal, isnull
import pandas.indexes.base as ibase
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def _maybe_cast_slice_bound(self, label, side, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
def _convert_tolerance(self, tolerance):
try:
return float(tolerance)
except ValueError:
raise ValueError('tolerance argument for %s must be numeric: %r' %
(type(self).__name__, tolerance))
class Int64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Int64Index is a special case
of `Index` with purely integer labels. This is the default index type used
by the DataFrame and Series ctors when no explicit index is provided by the
user.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: int64)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects
"""
_typ = 'int64index'
_groupby = _algos.groupby_int64
_arrmap = _algos.arrmap_int64
_left_indexer_unique = _algos.left_join_indexer_unique_int64
_left_indexer = _algos.left_join_indexer_int64
_inner_indexer = _algos.inner_join_indexer_int64
_outer_indexer = _algos.outer_join_indexer_int64
_can_hold_na = False
_engine_type = _index.Int64Engine
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
elif issubclass(data.dtype.type, np.integer):
# don't force the upcast as we may be dealing
# with a platform int
if (dtype is None or
not issubclass(np.dtype(dtype).type, np.integer)):
dtype = np.int64
subarr = np.array(data, dtype=dtype, copy=copy)
else:
subarr = np.array(data, dtype=np.int64, copy=copy)
if len(data) > 0:
if (subarr != data).any():
raise TypeError('Unsafe NumPy casting to integer, you must'
' explicitly cast')
return cls._simple_new(subarr, name=name)
@property
def inferred_type(self):
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
def _convert_scalar_indexer(self, key, kind=None):
"""
convert a scalar indexer
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
try:
return com.array_equivalent(com._values_from_object(self),
com._values_from_object(other))
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
class Float64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Float64Index is a special case
of `Index` with purely floating point labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Float64Index instance can **only** contain hashable objects
"""
_typ = 'float64index'
_engine_type = _index.Float64Engine
_groupby = _algos.groupby_float64
_arrmap = _algos.arrmap_float64
_left_indexer_unique = _algos.left_join_indexer_unique_float64
_left_indexer = _algos.left_join_indexer_float64
_inner_indexer = _algos.inner_join_indexer_float64
_outer_indexer = _algos.outer_join_indexer_float64
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name)
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if dtype is None:
dtype = np.float64
dtype = np.dtype(dtype)
# allow integer / object dtypes to be passed, but coerce to float64
if dtype.kind in ['i', 'O']:
dtype = np.float64
elif dtype.kind in ['f']:
pass
else:
raise TypeError("cannot support {0} dtype in "
"Float64Index".format(dtype))
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except:
raise TypeError('Unsafe NumPy casting, you must explicitly cast')
# coerce to float64 for storage
if subarr.dtype != np.float64:
subarr = subarr.astype(np.float64)
return cls._simple_new(subarr, name)
@property
def inferred_type(self):
return 'floating'
def astype(self, dtype):
if np.dtype(dtype) not in (np.object, np.float64):
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(self._values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, kind=None):
"""
convert a scalar indexer
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
def _convert_slice_indexer(self, key, kind=None):
"""
convert a slice indexer, by definition these are labels
unless we are iloc
Parameters
----------
key : label of the slice bound
kind : optional, type of the indexing operation (loc/ix/iloc/None)
"""
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.core.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not lib.isscalar(key):
raise InvalidIndexError
from pandas.core.indexing import maybe_droplevels
from pandas.core.series import Series
k = com._values_from_object(key)
loc = self.get_loc(k)
new_values = com._values_from_object(series)[loc]
if lib.isscalar(new_values) or new_values is None:
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._values, other._values
return ((left == right) | (self._isnan & other._isnan)).all()
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
return False
except:
return False
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember_nans(np.array(self), value_set,
isnull(list(value_set)).any())
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| gpl-2.0 |
adrienpacifico/openfisca-survey-manager | openfisca_survey_manager/scenarios.py | 1 | 10418 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import numpy as np
import pandas
from openfisca_core import periods, simulations
from .surveys import Survey
log = logging.getLogger(__name__)
class AbstractSurveyScenario(object):
inflators = None
input_data_frame = None
legislation_json = None
simulation = None
tax_benefit_system = None
used_as_input_variables = None
year = None
weight_column_name_by_entity_key_plural = dict()
def init_from_data_frame(self, input_data_frame = None, tax_benefit_system = None, used_as_input_variables = None,
year = None):
assert input_data_frame is not None
self.input_data_frame = input_data_frame
if used_as_input_variables is None:
self.used_as_input_variables = []
else:
assert isinstance(used_as_input_variables, list)
self.used_as_input_variables = used_as_input_variables
assert tax_benefit_system is not None
self.tax_benefit_system = tax_benefit_system
assert year is not None
self.year = year
return self
def inflate(self, inflators = None):
if inflators is not None:
self.inflators = inflators
assert self.inflators is not None
assert self.simulation is not None
simulation = self.simulation
tax_benefit_system = self.tax_benefit_system
for column_name, inflator in inflators:
assert column_name in tax_benefit_system.column_by_name
holder = simulation.get_or_new_holder(column_name)
holder.array = inflator * holder.array
def new_simulation(self, debug = False, debug_all = False, reference = False, trace = False):
assert isinstance(reference, (bool, int)), \
'Parameter reference must be a boolean. When True, the reference tax-benefit system is used.'
tax_benefit_system = self.tax_benefit_system
if reference:
while True:
reference_tax_benefit_system = tax_benefit_system.reference
if reference_tax_benefit_system is None:
break
tax_benefit_system = reference_tax_benefit_system
assert self.init_from_data_frame is not None
assert self.tax_benefit_system is not None
input_data_frame = self.input_data_frame
period = periods.period(self.year)
simulation = simulations.Simulation(
debug = debug,
debug_all = debug_all,
period = period,
tax_benefit_system = self.tax_benefit_system,
trace = trace,
)
id_variables = [
entity.index_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
if not entity.is_persons_entity]
role_variables = [
entity.role_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
if not entity.is_persons_entity]
for id_variable in id_variables + role_variables:
assert id_variable in self.input_data_frame.columns, \
"Variable {} is not present in input dataframe".format(id_variable)
column_by_name = self.tax_benefit_system.column_by_name
for column_name in input_data_frame:
if column_name not in column_by_name:
log.info('Unknown column "{}" in survey, dropped from input table'.format(column_name))
# waiting for the new pandas version to hit Travis repo
input_data_frame = input_data_frame.drop(column_name, axis = 1)
# , inplace = True) # TODO: effet de bords ?
for column_name in input_data_frame:
if column_name in id_variables + role_variables:
continue
if column_by_name[column_name].formula_class.function is not None:
if column_name in self.used_as_input_variables:
log.info(
'Column "{}" not dropped because present in used_as_input_variabels'.format(column_name))
continue
log.info('Column "{}" in survey set to be calculated, dropped from input table'.format(column_name))
input_data_frame = input_data_frame.drop(column_name, axis = 1)
# , inplace = True) # TODO: effet de bords ?
for entity in simulation.entity_by_key_singular.values():
if entity.is_persons_entity:
entity.count = entity.step_size = len(input_data_frame)
else:
entity.count = entity.step_size = (input_data_frame[entity.role_for_person_variable_name] == 0).sum()
entity.roles_count = input_data_frame[entity.role_for_person_variable_name].max() + 1
for column_name, column_serie in input_data_frame.iteritems():
holder = simulation.get_or_new_holder(column_name)
entity = holder.entity
if column_serie.values.dtype != holder.column.dtype:
log.info(
'Converting {} from dtype {} to {}'.format(
column_name, column_serie.values.dtype, holder.column.dtype)
)
if entity.is_persons_entity:
array = column_serie.values.astype(holder.column.dtype)
else:
array = column_serie.values[input_data_frame[entity.role_for_person_variable_name].values == 0].astype(
holder.column.dtype)
assert array.size == entity.count, 'Bad size for {}: {} instead of {}'.format(
column_name,
array.size,
entity.count)
holder.set_input(period, np.array(array, dtype = holder.column.dtype))
self.simulation = simulation
if 'initialize_weights' in dir(self):
self.initialize_weights()
if 'custom_initialize' in dir(self):
self.custom_initialize()
return simulation
# def new_simulation_bis(self, debug = False, debug_all = False, trace = False):
# assert self.init_from_data_frame is not None
# assert self.tax_benefit_system is not None
# input_data_frame_by_entity_key_plural = self.input_data_frame_by_entity_key_plural
# period = periods.period(self.year)
# simulation = simulations.Simulation(
# debug = debug,
# debug_all = debug_all,
# period = period,
# tax_benefit_system = self.tax_benefit_system,
# trace = trace,
# )
#
# id_variables = [
# entity.index_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
# if not entity.is_persons_entity]
#
# role_variables = [
# entity.role_for_person_variable_name for entity in simulation.entity_by_key_singular.values()
# if not entity.is_persons_entity]
#
# TODO: finish for multiple data_frame
def create_data_frame_by_entity_key_plural(self, variables = None, indices = False, roles = False):
assert variables is not None or indices or roles
variables = list(
set(variables).union(set(self.index_variables(indices = indices, roles = roles)))
)
tax_benefit_system = self.tax_benefit_system
simulation = self.simulation
missing_variables = set(variables).difference(set(self.tax_benefit_system.column_by_name.keys()))
if missing_variables:
log.info("These variables aren't par of the tax-benefit system: {}".format(missing_variables))
columns_to_fetch = [
self.tax_benefit_system.column_by_name.get(variable_name) for variable_name in variables
if self.tax_benefit_system.column_by_name.get(variable_name) is not None
]
openfisca_data_frame_by_entity_key_plural = dict()
for entity_key_plural in tax_benefit_system.entity_class_by_key_plural.keys():
column_names = [
column.name for column in columns_to_fetch
if column.entity_key_plural == entity_key_plural
]
openfisca_data_frame_by_entity_key_plural[entity_key_plural] = pandas.DataFrame(
dict((column_name, simulation.calculate_add(column_name)) for column_name in column_names)
)
return openfisca_data_frame_by_entity_key_plural
def dump_data_frame_by_entity_key_plural(self, variables = None, survey_collection = None, survey_name = None):
assert survey_collection is not None
assert survey_name is not None
assert variables is not None
openfisca_data_frame_by_entity_key_plural = self.create_data_frame_by_entity_key_plural(variables = variables)
for entity_key_plural, data_frame in openfisca_data_frame_by_entity_key_plural.iteritems():
survey = Survey(name = survey_name)
survey.insert_table(name = entity_key_plural, data_frame = data_frame)
survey_collection.surveys.append(survey)
survey_collection.dump(collection = "openfisca")
def index_variables(self, indices = True, roles = True):
variables = list()
for entity in self.tax_benefit_system.entity_class_by_key_plural.values():
if indices:
variables.append(entity.index_for_person_variable_name)
if roles:
variables.append(entity.role_for_person_variable_name)
return variables
| agpl-3.0 |
cgre-aachen/gempy | gempy/utils/geomodeller_integration.py | 1 | 53224 | """Class definition for GeoModeller XML-Files
This version includes drillholes
Specific methods are defined for the uncertainty analysis (in combination
with Uncertainty_Obj module)
(c) J. Florian Wellmann, 2009-2013
"""
# try:
# import elementtree.ElementTree as ET
# except ImportError:
# try:
# import etree.ElementTree as ET
# except ImportError:
# try:
# import xml.etree.ElementTree as ET
# except ImportError:
# import ElementTree as ET
# import Latex_output_5 as LO
from pylab import *
import copy
import pandas as pn
import gempy as gp
import numpy as np
# python module to wrap GeoModeller XML file and perform all kinds of data
# procedures, e.g.:
# - Stochastic data modeling
# - TWT to depth conversion
# - Documentation module
#
# To Do:
# - exception handling and passing
# - log file?
# - check validity of xml-code, test, if past-processing with sax neccessary?
# - implement auto-documentation
# - clear-up spaghetti code!!!!! Check dependencies and other modules
# to get a consistent lay-out
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# TODO fix this function and importing it to gempy.utils.create_from_geomodeller_xml
def create_from_geomodeller_xml(fp, resolution=(50, 50, 50), return_xml=False, **kwargs):
"""
EXPERIMENTAL
Creates InputData object from a GeoModeller xml file. Automatically extracts and sets model extent, interface
and orientation data as well as the stratigraphic pile.
Args:
fp (str): Filepath for the GeoModeller xml file to be read.
resolution (tuple, optional): Tuple containing the number of voxels in dimensions (x,y,z). Defaults to 50.
return_xml (bool, optional): Toggles returning the ReadGeoModellerXML instance to leverage further info from the
xml file (e.g. for stratigraphic pile ordering). Defaults to True.
**kwargs: Keyword arguments for create_data function.
Returns:
gp.data_management.InputData
"""
gmx = _ReadGeoModellerXML(fp) # instantiate parser class with filepath of xml
# instantiate InputData object with extent and resolution
geo_data = create_data(gmx.extent, resolution, **kwargs)
# set interface and orientation dataframes
geo_data._surface_points = gmx._surface_points
geo_data._orientations = gmx._orientations
if return_xml:
return geo_data, gmx
else:
return geo_data
class ReadGeoModellerXML:
def __init__(self, fp):
"""
Reads in and parses a GeoModeller XML file to extract interface and orientation data and the overall model
settings (e.g. extent and sequential pile). It uses ElementTree to parse the XML and the tree's root can
be accessed using self.root for more direct access to the file.
Args:
fp (str): Filepath for the GeoModeller xml file to be read.
"""
self.tree = ET.ElementTree(file=fp) # load xml as tree
self.root = self.tree.getroot()
self.xmlns = "http://www.geomodeller.com/geo"
self.gml = "http://www.opengis.net/gml"
self.extent = self._get_extent()
self.data = self.extract_data()
self.surface_points, self.orientations = self.get_dataframes()
# self.stratigraphic_column = self.get_stratigraphic_column()
# self.df = self.get_faults()
#
# self.series_info = self._get_series_fmt_dict()
# self.series_distribution = self.get_series_distribution()
#
# self.fault_matrix = self.get_fault_matrix()
def get_psc(self):
"""Returns the ProjectStratigraphicColumn tree element used for several data extractions."""
return self.root.find("{" + self.xmlns + "}GeologicalModel").find("{"+self.xmlns+"}ProjectStratigraphicColumn")
def extract_data(self):
"""
Extracts relevant data from the GeoModeller XML file ElementTree root (self.root) and returns it as a dictionary.
Returns:
(dict): Data dictionary
"""
data = {}
for s in self.get_psc():
sn = s.get("name")
data[sn] = {} # create a dict for each series
data[sn]["formations"] = []
data[sn]["InfluencedByFault"] = []
data[sn]["relation"] = s.get("relation") # add relation, whatever that is
for c in s:
if c.tag == "{"+self.xmlns+"}Data": # append formation names to list of formations
data[sn]["formations"].append(c.get("Name"))
if c.tag == "{"+self.xmlns+"}InfluencedByFault": # add fault influences
data[sn]["InfluencedByFault"].append(c.get("Name"))
if c.tag == "{"+self.xmlns+"}PotentialField":
data[sn]["gradients"] = []
data[sn]["surface_points"] = []
data[sn]["surface_points_counters"] = []
data[sn]["solutions"] = []
data[sn]["constraints"] = []
for cc in c:
# COVARIANCE
if cc.tag == "{" + self.xmlns + "}covariance":
data[sn]["covariance"] = cc.attrib
# GRADIENTS
if cc.tag == "{" + self.xmlns + "}Gradients":
for gr in cc:
data[sn]["gradients"].append([gr.get("Gx"), gr.get("Gy"), gr.get("Gz"),
gr.get("XGr"), gr.get("YGr"), gr.get("ZGr")])
# surface_points
if cc.tag == "{" + self.xmlns + "}Points":
for co in cc:
data[sn]["surface_points"].append([float(co[0].text), float(co[1].text), float(co[2].text)])
# INTERFACE COUNTERS
if cc.tag == "{" + self.xmlns + "}InterfacePoints":
for ip in cc:
data[sn]["surface_points_counters"].append([int(ip.get("npnt")), int(ip.get("pnt"))])
# CONSTRAINTS
if cc.tag == "{" + self.xmlns + "}Constraints":
for co in cc:
data[sn]["constraints"].append(float(co.get("value")))
# SOLUTIONS
if cc.tag == "{" + self.xmlns + "}Solutions":
for sol in cc:
data[sn]["solutions"].append(float(sol.get("sol")))
# convert from str to float
data[sn]["gradients"] = np.array(data[sn]["gradients"]).astype(float)
data[sn]["surface_points"] = np.array(data[sn]["surface_points"]).astype(float)
data[sn]["surface_points_counters"] = np.array(data[sn]["surface_points_counters"]).astype(float)
data[sn]["solutions"] = np.array(data[sn]["solutions"]).astype(float)
return data
def get_dataframes(self):
"""
Extracts dataframe information from the self.data dictionary and returns GemPy-compatible surface_points and
orientations dataframes.
Returns:
(tuple) of GemPy dataframes (surface_points, orientations)
"""
interf_formation = []
interf_series = []
orient_series = []
for i, s in enumerate(self.data.keys()): # loop over all series
if i == 0:
coords = self.data[s]["surface_points"]
grads = self.data[s]["gradients"]
else:
coords = np.append(coords, self.data[s]["surface_points"])
grads = np.append(grads, self.data[s]["gradients"])
for j, fmt in enumerate(self.data[s]["formations"]):
for n in range(int(self.data[s]["surface_points_counters"][j, 0])):
interf_formation.append(fmt)
interf_series.append(s)
for k in range(len(grads)):
orient_series.append(s)
surface_points = pn.DataFrame(coords, columns=['X', 'Y', 'Z'])
surface_points["formation"] = interf_formation
surface_points["series"] = interf_series
orientations = pn.DataFrame(grads, columns=['G_x', 'G_y', 'G_z', 'X', 'Y', 'Z'])
orientations["series"] = orient_series
dips = []
azs = []
pols = []
for i, row in orientations.iterrows():
dip, az, pol = gp.data_management.get_orientation((row["G_x"], row["G_y"], row["G_z"]))
dips.append(dip)
azs.append(az)
pols.append(pol)
orientations["dip"] = dips
orientations["azimuth"] = azs
orientations["polarity"] = pols
return surface_points, orientations
def get_stratigraphic_column(self):
"""
Extracts series names from ElementTree root.
Returns:
tuple: Series names (str) in stratigraphic order.
"""
stratigraphic_column = []
for s in self.get_psc():
stratigraphic_column.append(s.get("name"))
return tuple(stratigraphic_column)
def get_order_formations(self):
order_formations = []
for entry in self.series_distribution.values():
if type(entry) is str:
order_formations.append(entry)
elif type(entry) is tuple:
for e in entry:
order_formations.append(e)
return order_formations
def get_faults(self):
"""
Extracts fault names from ElementTree root.
Returns:
tuple: Fault names (str) ordered as in the GeoModeller XML.
"""
faults = []
for c in self.root[2]:
faults.append(c.get("Name"))
return tuple(faults)
def get_series_distribution(self):
"""
Combines df and stratigraphic series into an unordered dictionary as keys and maps the correct
formations to them as a list value. Faults series get a list of their own string assigned as formation.
Returns:
(dict): maps Series (str) -> Surfaces (list of str)
"""
series_distribution = {}
for key in self.series_info.keys():
fmts = self.series_info[key]["formations"]
if len(fmts) == 1:
series_distribution[key] = fmts[0]
else:
series_distribution[key] = tuple(fmts)
for f in self.stratigraphic_column:
if "Fault" in f or "fault" in f:
series_distribution[f] = f
return series_distribution
def _get_extent(self):
"""
Extracts model extent from ElementTree root and returns it as tuple of floats.
Returns:
tuple: Model extent as (xmin, xmax, ymin, ymax, zmin, zmax).
"""
xy = self.root[0][0][0][0].attrib
z = self.root[0][0][0][1].attrib
return tuple(np.array([xy["Xmin"], xy["Xmax"],
xy["Ymin"], xy["Ymax"],
z["Zmin"], z["Zmax"]]).astype(float))
def get_surface_points_df(self):
"""
Extracts the interface data points stored in the GeoModeller xml file and returns it as a GemPy surface_points
dataframe.
Returns:
pandas.DataFrame: InputData.surface_points dataframe
"""
if self.root.find("{" + self.xmlns + "}Structural3DData") is None:
print("No 3D data stored in given xml file.")
return None
else:
fmts = [c.attrib["Name"] for c in self.root.find("{" + self.xmlns + "}Structural3DData")[0]] # use formations
xyzf = []
for i, fmt in enumerate(fmts): # loop over all formations
for p in self.root[5][0][i]: # loop over every point
entry = p[0].text.split(",") # split the string by its seperator into coord strings
entry.append(fmt)
for s in self.series_info.keys():
if fmt in self.series_info[s]["formations"]:
series = s
else:
series = fmt
entry.append(series)
xyzf.append(entry)
surface_points = pn.DataFrame(np.array(xyzf), columns=['X', 'Y', 'Z', "formation", "series"])
surface_points[["X", "Y", "Z"]] = surface_points[["X", "Y", "Z"]].astype(float)
return surface_points
def get_orientation_df(self):
"""
Extracts the orientation data points sotred in the GeoModeller xml file and returns it as a GemPy
orientations dataframe.
Returns:
pandas.DataFrame: InputData.orientations dataframe
"""
if self.root.find("{" + self.xmlns + "}Structural3DData") is None:
print("No 3D data stored in given xml file.")
return None
else:
fol = []
for i, s in enumerate(self.root.find("{" + self.xmlns + "}Structural3DData")[1]):
for c in self.root.find("{" + self.xmlns + "}Structural3DData")[1][i]:
entry = c[0][0].text.split(",")
entry.append(c.get("Dip"))
entry.append(c.get("Azimuth"))
# correct polarity from bool str to int
pol = c.get("NormalPolarity")
if pol == "true":
entry.append(1)
else:
entry.append(-1)
entry.append(s.get("Name"))
for series in self.series_distribution.keys():
if s.get("Name") in self.series_distribution[series]:
entry.append(series)
fol.append(entry)
orientations = pn.DataFrame(np.array(fol), columns=['X', 'Y', 'Z', 'dip', 'azimuth', 'polarity', 'formation', 'series'])
orientations[["X", "Y", "Z", "dip", "azimuth"]] = orientations[["X", "Y", "Z", "dip", "azimuth"]].astype(float)
orientations["polarity"] = orientations["polarity"].astype(int)
return orientations
def _get_series_fmt_dict(self):
sp = {}
for i, s in enumerate(self.stratigraphic_column): # loop over all series
fmts = [] # init formation storage list
influenced_by = [] # init influenced by list
for c in self.root.find("{" + self.xmlns + "}GeologicalModel").find("{"+self.xmlns+"}ProjectStratigraphicColumn")[i]:
if "Data" in c.tag:
fmts.append(c.attrib["Name"])
elif "InfluencedByFault" in c.tag:
influenced_by.append(c.attrib["Name"])
# print(fmts)
sp[s] = {}
sp[s]["formations"] = fmts
sp[s]["InfluencedByFault"] = influenced_by
return sp
def _where_do_faults_stop(self):
fstop = {}
for i, f in enumerate(self.root[2]):
stops_on = []
for c in self.root[2][i][2:]:
stops_on.append(c.get("Name"))
fstop[f.get("Name")] = stops_on
return fstop
def get_fault_matrix(self):
nf = len(self.faults)
fm = np.zeros((nf, nf)) # zero matrix of n_faults²
fstop = self._where_do_faults_stop()
for i, f in enumerate(self.faults):
for fs in fstop[f]:
j = np.where(np.array(self.faults) == fs)[0][0]
fm[i, j] = 1
return fm
# TODO think where this function should go
def read_vox(geo_data, path):
"""
read vox from geomodeller and transform it to gempy format
Returns:
numpy.array: block model
"""
import pandas as pn
geo_res = pn.read_csv(path)
geo_res = geo_res.iloc[9:]
# ip_addresses = geo_res['nx 50'].unique() # geo_model.surface_points["formation"].unique()
ip_dict = geo_data.get_formation_number()
# ip_dict = geo_model.surface_points['formation_number'].unique()
geo_res_num = geo_res.iloc[:, 0].replace(ip_dict)
block_geomodeller = np.ravel(geo_res_num.values.reshape(
geo_data.resolution[0], geo_data.resolution[1], geo_data.resolution[2], order='C').T)
return block_geomodeller
class GeomodellerClass:
"""Wrapper for GeoModeller XML-datafiles to perform all kinds of data
manipulation and analysis on low level basis, e.g.:
- Uncertainty Simulation
- TWT to depth conversion
- Data analysis, e.g. Auto-documentation"""
def __init__(self):
"""Wrapper for GeoModeller XML-datafiles to perform all kinds of data
manipulation and analysis on low level basis, e.g.:
- Uncertainty Simulation
- TWT to depth conversion
- Data analysis, e.g. Auto-documentation"""
def load_geomodeller_file(self, xml_file):
self.xml_file_name = xml_file
try:
tree = ET.parse(xml_file)
except IOError:
print(("Can not open xml File " + xml_file + ": " + string_err))
print ("Please check file name and directory and try again")
raise IOError
# safe tree on local varibale
self.tree = tree
# get rootelement
self.rootelement = tree.getroot()
# set other class variables
self.xmlns = "http://www.geomodeller.com/geo"
self.gml = "http://www.opengis.net/gml"
def load_deepcopy_tree(self, deepcopy_tree):
"""load tree information from deepcopied tree into object"""
self.tree = deepcopy_tree
self.rootelement = deepcopy_tree.getroot()
# set other class variables
self.xmlns = "http://www.geomodeller.com/geo"
self.gml = "http://www.opengis.net/gml"
def deepcopy_tree(self):
"""create a deep copy of original tree to restore later, e.g. for uncertainty evaluation"""
deepcopy_tree = copy.deepcopy(self.tree)
deepcopy_tree.parent = None
return deepcopy_tree
def reload_geomodeller_file(self, deepcopy_tree):
"""restore original tree root from deep copy of orignial tree
deep copy can be created (not automatically to save memory!) with
self.deepcopy_tree()
"""
try:
self.tree = deepcopy_tree
self.rootelement = self.tree.getroot()
except NameError:
print ("No deep copy of original tree available, please create with self.deepcopy_tree()")
def get_model_extent(self):
"""get extent of model
returns (x_min, x_max, y_min, y_max, z_min, z_max)
and saves extent in self.x_min, self.x_max, etc.
"""
extent_parent = self.rootelement.find("{"+self.xmlns+"}Extent3DOfProject")
extentbox3D = extent_parent.find("{"+self.xmlns+"}ExtentBox3D")
extent3D = extentbox3D.find("{"+self.xmlns+"}Extent3D")
extent_xy = extent3D.find("{"+self.xmlns+"}ExtentXY")
extent_z = extent3D.find("{"+self.xmlns+"}ExtentZ")
self.x_min = float(extent_xy.get("Xmin"))
self.x_max = float(extent_xy.get("Xmax"))
self.y_min = float(extent_xy.get("Ymin"))
self.y_max = float(extent_xy.get("Ymax"))
self.z_min = float(extent_z.get("Zmin"))
self.z_max = float(extent_z.get("Zmax"))
return (self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max)
# def get_model_range(self):
# """get model range from model extent, e.g. for automatic mesh generation"""
# (x_min, x_max, y_min, y_max, z_min, z_max) = self.get_model_extent()
# from numpy import abs
# self.range_x = abs(x_max - x_min)
# self.range_y = abs(y_max - y_min)
# self.range_z = abs(z_max - z_min)
# return (self.range_x, self.range_y, self.range_z)
def get_sections(self):
"""get sections out of rootelement, safe array with section elements
in local variable"""
sections_parent = self.rootelement.findall("{"+self.xmlns+"}Sections")[0]
self.sections = sections_parent.findall("{"+self.xmlns+"}Section")
return self.sections
def get_faults(self):
"""get fault elements out of rootelement and safe as local list"""
try:
faults_parent = self.rootelement.findall("{"+self.xmlns+"}Faults")[0]
self.faults = faults_parent.findall("{"+self.xmlns+"}Fault")
except IndexError:
print("No df found in model")
return self.faults
def get_formations(self):
"""get formation elements out of rootelement and safe as local list"""
formations_parent = self.rootelement.findall("{"+self.xmlns+"}Surfaces")[0]
self.formations = formations_parent.findall("{"+self.xmlns+"}Formation")
def get_stratigraphy_list(self, **kwds):
"""get project stratigraphy and return as list; lowermost formation: 1
for GeoModeller dll access (this ist the formation_number that is returned with
the GetComputedLithologyXYZ function in the geomodeller dll
optional keywords:
out = string : set 'out' formation to this name (might be necessary for TOUGH2 simulation!)
"""
series_list = []
strati_column = self.rootelement.find("{"+self.xmlns+"}GeologicalModel").find("{"+self.xmlns+"}ProjectStratigraphicColumn")#.findall("{"+self.xmlns+"Series")
series = strati_column.findall("{"+self.xmlns+"}Series")
for s in series:
data = s.findall("{"+self.xmlns+"}Data")
for d in data:
series_list.append(d.get("Name"))
# append "out" as uppermost formation for "out values
if "tough2" in kwds:
if 'out' in kwds:
series_list.append(kwds['out'])
else:
series_list.append("out")
self.stratigraphy_list = series_list
return series_list
def get_section_names(self):
"""get all section names out of local variable self.sections"""
# todo: return section names as dictionary with element and name?
# test if self.sections defined, if not -> create
try:
self.sections
except AttributeError:
# print "Create sections Data array"
self.get_sections()
section_names = []
for section in self.sections:
section_names.append(section.get("Name"))
return section_names
def get_formation_names(self):
"""get formation names and return as list"""
forms=[]
try:
self.formations
except AttributeError:
self.get_formations()
for formation in self.formations:
forms.append(formation.get("Name"))
return forms
def get_fault_names(self):
"""get fault names and return as list"""
faults_list=[]
try:
self.faults
except AttributeError:
self.get_faults()
for fault in self.faults:
faults_list.append(fault.get("Name"))
return faults_list
def get_points_in_sections(self):
"""Create dictionary of all points (with obs-id) in all sections"""
self.create_sections_dict()
for sec in list(self.section_dict.keys()):
forms = self.get_formation_point_data(self.section_dict[sec])
if forms == None:
print ("\t\t\tNo Formation Points in this section")
else:
for form in forms:
#print form.get("ObservationID")
# if form.get("ObservationID") == None: continue
data = form.find("{"+self.xmlns+"}Data")
print(("\nObsID = %s" % form.get("ObservationID")))
print(("\tFormation name\t= %s" % data.get("Name")))
element_point = form.find("{"+self.gml+"}LineString")
element_coords = element_point.find("{"+self.gml+"}coordinates")
tmp = element_coords.text.split(" ")
for tmp1 in tmp:
if tmp1 == '': continue
tmp_cds = tmp1.split(",")
print(("\tX = %.1f, Y = %.1f" % (float(tmp_cds[0]), float(tmp_cds[1]))))
fol = form.find("{"+self.xmlns+"}FoliationObservation")
if fol is not None:
print(("\tFoliation defined: azimuth = %.1f, dip = %.1f" % (float(fol.get("Azimuth")), float(fol.get("Dip")))))
# get position of foliation (yet another point)
pt = fol.find("{"+self.gml+"}Point")
c = pt.find("{"+self.gml+"}coordinates")
cds = c.text.split(",")
print(("\t\tX = %.1f, Y = %.1f" % (float(cds[0]), float(cds[1]))))
print ("\n")
print((80*"-"))
print(("Foliations in section %s:" % sec))
print((80*"-"))
foliations = self.get_foliations(self.section_dict[sec])
if foliations == None:
print ("\t\t\tNo foliations in this section")
else:
for fol1 in foliations:
print(("\nObsID = %s" % fol1.get("ObservationID")))
data = fol1.find("{"+self.xmlns+"}Data")
fol = fol1.find("{"+self.xmlns+"}FoliationObservation")
print(( "\tFormation name\t= %s" % data.get("Name")))
print(("\tAzimuth = %.1f, dip = %.1f" % (float(fol.get("Azimuth")), float(fol.get("Dip")))))
pt = fol.find("{"+self.gml+"}Point")
c = pt.find("{"+self.gml+"}coordinates")
cds = c.text.split(",")
print(("\tX = %.1f, Y = %.1f" % (float(cds[0]), float(cds[1]))))
return
def get_formation_parameters(self):
"""read formation parameters; physical
properties, density, th. cond etc... store in dict"""
#
# To do: re-write in a more elegant way and keep original
# structure and key-words?
#
self.formation_params = {}
try:
self.formations
except AttributeError:
# print "Create sections Data array"
self.get_formations()
for formation in self.formations:
self.formation_params[formation.get("Name")] = {}
geophys = formation.find("{"+self.xmlns+"}Geophysics")
dens = geophys.find("{"+self.xmlns+"}DensityCompoundDistribution")
dens_simple = dens.find("{"+self.xmlns+"}SimpleDistribution")
self.formation_params[formation.get("Name")]["dens_mean"] = dens_simple.get("Mean")
self.formation_params[formation.get("Name")]["dens_law"] = dens_simple.get("LawType")
self.formation_params[formation.get("Name")]["dens_dev"] = dens_simple.get("Deviation")
# print geophys.getchildren()
mag = geophys.find("{"+self.xmlns+"}RemanantMagnetizationCompoundDistribution")
mag_simple = mag.find("{"+self.xmlns+"}SimpleDistributionVector")
self.formation_params[formation.get("Name")]["mag"] = mag_simple.get("Mean")
velocity = geophys.find("{"+self.xmlns+"}VelocityCompoundDistribution")
velocity_simple = velocity.find("{"+self.xmlns+"}SimpleDistribution")
self.formation_params[formation.get("Name")]["vel_mean"] = velocity_simple.get("Mean")
self.formation_params[formation.get("Name")]["vel_law"] = velocity_simple.get("LawType")
self.formation_params[formation.get("Name")]["vel_dev"] = velocity_simple.get("Deviation")
# Thermal properties are only defined in newer versions of GeoModeller! thus check!
th_cond = geophys.find("{"+self.xmlns+"}ThermalConductivityCompoundDistribution")
if th_cond == None: continue
th_cond_simple = th_cond.find("{"+self.xmlns+"}SimpleDistribution")
self.formation_params[formation.get("Name")]["th_cond_mean"] = th_cond_simple.get("Mean")
self.formation_params[formation.get("Name")]["th_cond_law"] = th_cond_simple.get("LawType")
self.formation_params[formation.get("Name")]["th_cond_dev"] = th_cond_simple.get("Deviation")
heat_prod = geophys.find("{"+self.xmlns+"}HeatProductionRateCompoundDistribution")
heat_prod_simple = heat_prod.find("{"+self.xmlns+"}SimpleDistribution")
self.formation_params[formation.get("Name")]["heat_prod_mean"] = heat_prod_simple.get("Mean")
self.formation_params[formation.get("Name")]["heat_prod_law"] = heat_prod_simple.get("LawType")
self.formation_params[formation.get("Name")]["heat_prod_dev"] = heat_prod_simple.get("Deviation")
# same for other properties
# print th_cond
#
# !!! only simple distributions yet impl.
#
def create_fault_dict(self):
"""create dictionary for fault elements with names as keys"""
# test if self.formations defined, if not -> create
try:
self.faults
except AttributeError:
print ("Create Surfaces list")
self.get_faults()
self.fault_dict = {}
for fault in self.faults:
self.fault_dict[fault.get("Name")] = fault
return self.fault_dict
def create_formation_dict(self):
"""create dictionary for formation elements with formation names as keys"""
# test if self.formations defined, if not -> create
try:
self.formations
except AttributeError:
print ("Create formation dictionary")
self.get_formations()
self.formation_dict = {}
for formation in self.formations:
self.formation_dict[formation.get("Name")] = formation
return self.formation_dict
def create_sections_dict(self):
"""create dictionary for section elements with section names as keys
(for easier use...)"""
# test if self.sections defined, if not -> create
try:
self.sections
except AttributeError:
# print "Create sections dictionary"
self.get_sections()
self.section_dict = {}
for section in self.sections:
self.section_dict[section.get("Name")] = section
return self.section_dict
def get_foliations(self, section_element):
"""get all foliation data elements from a for section"""
tmp_element = section_element.find("{"+self.xmlns+"}Structural2DData")
# check in case there is no foliation defined in this section
# tmp_element2 = tmp_element.find("{"+self.xmlns+"}Foliations")
try:
tmp_element2 = tmp_element.find("{"+self.xmlns+"}Foliations")
except AttributeError:
return None
try:
foliations = tmp_element2.findall("{"+self.xmlns+"}Foliation")
except AttributeError:
return None
return foliations
def get_foliation_dip(self, foliation_element):
"""get dip of foliation element"""
return float(foliation_element.find("{"+self.xmlns+"}FoliationObservation").get("Dip"))
def get_foliation_azimuth(self, foliation_element):
"""get dip of foliation element"""
return float(foliation_element.find("{"+self.xmlns+"}FoliationObservation").get("Azimuth"))
def get_folation_polarity(self, foliation_element):
"""get polarity of foliation element; return true if Normal Polarity"""
return foliation_element.find("{"+self.xmlns+"}FoliationObservation").get("NormalPolarity")
def get_foliation_coordinates(self, foliation_element):
"""get coordinates of foliation element"""
element_fol = foliation_element.find("{"+self.xmlns+"}FoliationObservation")
element_point = element_fol.find("{"+self.gml+"}Point")
element_coords = element_point.find("{"+self.gml+"}coordinates")
return str(element_coords.text)
def get_formation_data(self, section_element):
"""not used any more! use get_formation_point_data(section_element) instead"""
print ("not used any more! use get_formation_point_data(section_element) instead")
return None
def get_formation_point_data(self, section_element):
"""get all formation point data elements from a for section"""
tmp_element = section_element.find("{"+self.xmlns+"}Structural2DData")
# check in case there is no formation points defined in this section
try:
tmp_element2 = tmp_element.find("{"+self.xmlns+"}SurfacePoints")
except AttributeError:
return None
return tmp_element2.findall("{"+self.xmlns+"}Interface")
def get_name(self, section_element):
"""get the name of any section element (if defined)"""
return section_element.find("{"+self.xmlns+"}Name")
def get_interface_name(self, interface_element):
"""get name of interface, i.e. the formation"""
return interface_element.find("{"+self.xmlns+"}Data").get("Name")
def get_point_coordinates(self, point_elements, **args):
"""get the coordinates of a specific point memory locations"""
point_list = list()
for element in point_elements:
name = element.find("{"+self.xmlns+"}Data").get("Name")
#if args.has_key("if_name"):
if "if_name" in args:
if args["if_name"] != name: continue
element_point = element.find("{"+self.gml+"}LineString")
element_coords = element_point.find("{"+self.gml+"}coordinates")
point_list.append((name+ " " + str(element_coords.text)))
return point_list
def change_formation_values_PyMC(self, **args):
""" -So far is ready only to changes points in coordinates y. It is not difficult to add a new
dimension
- The dips and azimuth ObservationID must contain _d or _a respectively"""
if "info" in args:
section_dict = self.create_sections_dict()
contact_points_dict = {}
foliation_dict = {}
for i in range(len(section_dict)):
print(("\n\n\n", list(section_dict.keys())[i], "\n"))
print ("Elements and their ID \n")
contact_points = self.get_formation_point_data(list(section_dict.values())[i])
try:
for contact_point in contact_points:
contact_points_dict[contact_point.get("ObservationID")] = contact_point
print((contact_point, contact_point.get("ObservationID")))
except TypeError:
print ("No contact points in the section")
#ObsID = contact_points.get("ObservationID")
foliations = self.get_foliations(list(section_dict.values())[i])
try:
for foliation in foliations:
# dictionary to access with azimth name
foliation_dict[foliation.get("ObservationID")+"_a"] = foliation
# dictionary to access with dip name
foliation_dict[foliation.get("ObservationID")+"_d"] = foliation
print((foliation, foliation.get("ObservationID")))
except TypeError:
print ("No foliation in the section")
try:
coord_interface = self.get_point_coordinates(contact_points)
except TypeError:
print ("Element does not have iterable objects")
print(("\nDictionaries:\n ", contact_points_dict, "\n", foliation_dict))
print(("\n Contact points", contact_points, "\n", coord_interface, "\n"))
print(("foliations" , foliations, "\n"))
try:
for i in range(len(foliations)):
print(("azimut:",self.get_foliation_azimuth(foliations[i])))
print(("dip",self.get_foliation_dip(foliations[i])))
print(("coordinates", self.get_foliation_coordinates(foliations[i])))
except TypeError:
print ("No foliation in the section")
return None
#========================
# change the stuff
#=======================
section_dict = self.create_sections_dict()
contact_points_dict = {}
foliation_dict = {}
#Creation of dictionaries according to the ObservationID
for i in range(len(section_dict)):
# Contact points:
try:
contact_points = self.get_formation_point_data(list(section_dict.values())[i])
for contact_point in contact_points:
contact_points_dict[contact_point.get("ObservationID")] = contact_point
except TypeError:
continue
# Foliation Points
try:
foliations = self.get_foliations(list(section_dict.values())[i])
for foliation in foliations:
# dictionary to access with azimth name
foliation_dict[foliation.get("ObservationID")+"_a"] = foliation
# dictionary to access with dip name
foliation_dict[foliation.get("ObservationID")+"_d"] = foliation
except TypeError:
continue
# Passing our chain values:
# Contact_points
if "contact_points_mc" in args:
for contac_point_mc in args["contact_points_mc"]:
try:
element = contact_points_dict[str(contac_point_mc)]
element_point = element.find("{"+self.gml+"}LineString")
element_coords = element_point.find("{"+self.gml+"}coordinates")
point_list = element_coords.text.split(" ")
if point_list[-1] == '':
point_list = point_list[0:-1]
if len(point_list) == 1:
self.change_formation_point_pos(element, y_coord = contac_point_mc.value)
#Specific case of the Graben:
elif len(point_list) == 2:
self.change_formation_point_pos(element, y_coord = [contac_point_mc.value, contac_point_mc.value])
else:
print ("The lenght of the points to change does not fit with the number of changes in the input (>2)")
except KeyError:
print(("The name of your PyMC variables (%s) does not agree with the ObservationID in the xml. Check misspellings." % str(contac_point_mc)))
continue
# Azimuths
if "azimuths_mc" in args:
for azimuth_mc in args["azimuths_mc"]:
#print azimuth_mc, type(azimuth_mc)
try:
self.change_foliation(foliation_dict[str(azimuth_mc)], azimuth = str(azimuth_mc.value))
except KeyError:
print(("The name of your PyMC variables (%s) does not agree with the ObservationID in the xml. Check misspellings." % str(azimuth_mc)))
continue
# Dips
if "dips_mc" in args:
for dip_mc in args["dips_mc"]:
try:
self.change_foliation(foliation_dict[str(dip_mc)], dip = str(dip_mc.value))
except KeyError:
print(("The name of your PyMC variables (%s) does not agree with the ObservationID in the xml. Check misspellings." % str(dip_mc)))
continue
# To do: vectorize this
def change_formation_point_pos(self, element, **args):
"""change position of formation point in section element
arguments:
x_coord, y_coord : set to this coordinates
add_x_coord, add_y_coord : add values to existing coordinates
use if_name = and if_provenance = to add conditions!
print_points = bool: print the list of points that will be modified (default: False)"""
# print "I am here"
#print_points = kwds.get('print_points', False)
prov = element.get("Provenance")
name = element.find("{"+self.xmlns+"}Data").get("Name")
#if args.has_key("if_name"):
if "if_name" in args:
if args["if_name"] != name: return
# if args.has_key("if_provenance"):
if "if_provenance" in args:
if args["if_provenance"] != prov: return
# element_fol = element.find("{"+self.xmlns+"}")
element_point = element.find("{"+self.gml+"}LineString")
element_coords = element_point.find("{"+self.gml+"}coordinates")
point_list = element_coords.text.split(" ")
# print "poitn lits", point_list
if point_list[-1] == '':
point_list = point_list[0:-1]
if len(point_list) > 1:
x_coords = []
y_coords = []
if "print_points" in args:
print (point_list)
for point in point_list:
# if point == '': continue
a = point.split(',')
#print a
[x_coord, y_coord] = [float(a[0]), float(a[1])]
x_coords.append(x_coord)
y_coords.append(y_coord)
# convert to arrays for calculation
x_coords = array(x_coords)
y_coords = array(y_coords)
# Here y_coord, and x_coord
if "x_coord" in args:
if shape(point_list) == shape(args["x_coord"]):
#except TypeError:
x_coords = array(args["x_coord"])
else:
print ("length of the points you want to change do not match with input dimensions")
if "y_coord" in args:
#print (args["y_coord"])
#print array(args["y_coord"])
if shape(point_list) == shape(args["y_coord"]):
y_coords = array(args["y_coord"])
# print "ycoords", y_coords
else:
print ("length of the points you want to change do not match with input dimensions")
#print "Coordenates", x_coords, y_coords
# Here add coords
if "add_x_coord" in args:
x_coords = x_coords + float(args["add_x_coord"])
if "add_y_coord" in args:
y_coords = y_coords + float(args["add_y_coord"])
# print y_coords
# now, reconstruct output format strings
out_text = ''
for (i, x_coord) in enumerate(x_coords):
out_text += "%f,%f " % (x_coords[i],y_coords[i])
element_coords.text = out_text
else:
[x_coord, y_coord] = point_list[0].split(",")
[x_coord, y_coord] = [float(x_coord), float(y_coord)]
if "x_coord" in args:
x_coord = float(args["x_coord"])
if "y_coord" in args:
y_coord = float(args["y_coord"])
if "add_x_coord" in args:
x_coord = x_coord + float(args["add_x_coord"])
if "add_y_coord" in args:
y_coord = y_coord + float(args["add_y_coord"])
element_coords.text = "%f,%f" % (x_coord, y_coord)
return None
def change_foliation_polarity(self, element):
"""change polarity of foliation element"""
if element.get("NormalPolarity") == "true":
element.set("NormalPolarity", "false")
else:
element.set("NormalPolarity", "true")
def change_foliation(self, element, **args):
"""change foliation data, argument one or more of: azimuth, dip,
normalpolarity = true/false, x_coord, y_coord" or: add_dip, add_azimuth,
add_x_coord, add_y_coord to add values to existing values!
use if_name = and if_provenance = to add conditions!"""
prov = element.get("Provenance")
name = element.find("{"+self.xmlns+"}Data").get("Name")
if "if_name" in args:
if args["if_name"] != name: return
if "if_provenance" in args:
if args["if_provenance"] != prov: return
element_fol = element.find("{"+self.xmlns+"}FoliationObservation")
if "dip" in args:
element_fol.set("Dip", args["dip"])
if "azimuth" in args:
element_fol.set("Azimuth", args["azimuth"])
if "nomalpolarity" in args:
element_fol.set("NormalPolarity", args["normalpolarity"])
#
# To Do: logfile, if dip >90, azi > 360, ...
#
if "add_dip" in args:
dip_org = float(element_fol.get("Dip"))
dip_new = dip_org + float(args["add_dip"])
if dip_new > 90:
dip_new = 180 - dip_new
self.change_foliation_polarity(element_fol)
azi_org = float(element_fol.get("Azimuth"))
if azi_org < 180:
element_fol.set("Azimuth", str(azi_org+180))
else:
element_fol.set("Azimuth", str(azi_org-180))
element_fol.set("Dip", str(dip_new))
if "add_azimuth" in args:
azi_org = float(element_fol.get("Azimuth"))
azi_new = azi_org + float(args["add_azimuth"])
if azi_new > 360.0: azi_new -= 360
element_fol.set("Azimuth", str(azi_new))
element_point = element_fol.find("{"+self.gml+"}Point")
element_coords = element_point.find("{"+self.gml+"}coordinates")
[x_coord, y_coord] = element_coords.text.split(",")
[x_coord, y_coord] = [float(x_coord), float(y_coord)]
if "x_coord" in args:
x_coord = float(args["x_coord"])
if "y_coord" in args:
y_coord = float(args["y_coord"])
if "add_x_coord" in args:
x_coord = x_coord + float(args["add_x_coord"])
if "add_y_coord" in args:
y_coord = y_coord + float(args["add_y_coord"])
element_coords.text = "%f,%f" % (x_coord, y_coord)
return None
def twt_to_depth(self, sec_element, formula, **args):
"""Convert all data within a section from twt to depth (including
orientation data!!
Input: section element with data points, conversion function as
string with 't' as placeholder for twt-time, e.g. '2 * (-t) ** 2 + 18 * (-t)'
ATTENTION: check if t negative
optional arguments:
change_dip (boolean) : change dip angle in foliation data
according to first derivative of twt-to-depth formula
create_plot (boolean) : create summary plot with twt and converted depth
for conversion formula control
"""
# Idea: stoachstic apporach to twt -> depth conversion: apply several
# possible formulae for quality estimation of resulting model?
struct_data = sec_element.find("{"+self.xmlns+"}Structural2DData")
surface_points = struct_data.find("{"+self.xmlns+"}SurfacePoints").findall("{"+self.xmlns+"}Interface")
# save data in list to create a plot to check validity of conversion
t_list = []
v_list = []
for interface in surface_points:
gml_coords_element = interface.find("{"+self.gml+"}LineString").find("{"+self.gml+"}coordinates")
# check for correct decimal, column (cs) and text separator (ts)
ts = gml_coords_element.get("ts")
cs = gml_coords_element.get("cs")
data_array = gml_coords_element.text.split(ts)
# check if last entry is empty (seems to happen often), if so: delete!
# print gml_coords_element.text
if data_array[-1] == "": del data_array[-1]
text_new = ""
# apply conversion formula for z-direction, i.e. dv;
# no change in x-direction -> du = 0 (but: maybe neccessary for specific situations?)
for entry in data_array:
parts = entry.split(cs)
# get original values
# t as varibale, as defined in formula (input)
t = float(parts[1])
v_new = eval(formula)
du = 0
text_new += "%f%s%f%s" % (float(parts[0])+du, cs, v_new, ts)
# append to list for check-plot
t_list.append(-t)
v_list.append(v_new)
# print text_new
gml_coords_element.text = text_new
# print gml_coords_element.text
# now change foliation position and dip angle! (check: given as argument?)
# for dip angle: numerical determination of first derivative for
# twt to depth conversion formula?
if "change_dip" in args and args["change_dip"]:
print ("change dip in seismic profile")
# create check-plot
if "create_plot" in args and args["create_plot"]:
print ("Create plot with twt, converted depth pairs")
plot(t_list,v_list,'.', label = formula)
title("TWT to depth: Converted data points\nSection: " + sec_element.get("Name"))
xlabel("TWT [ms]")
ylabel("Converted depth [m]")
legend()
grid(True)
show()
def get_pole_points(self, element):
# function to plot data points in geomodeller element
u = []
v = []
poles = element.getiterator("{"+self.xmlns+"}Pole-Weight")
for pole in poles:
u.append(pole.get("U"))
v.append(pole.get("V"))
return (u,v)
def plot_points(self, element):
# plot u,v points in simple 2D plot
(u,v) = self.get_pole_points(element)
plot(u,v,'o')
name = element.get("Name")
title(name)
savefig(name+"_points.png")
def write_xml(self, save_dir, **args):
"""Write elementtree to xml-file
arguments:
print_path: Print the path where the xml is created"""
# to do: filename (and directory?) as optional argument"""
# flo, 10/2008
#file = "changed_model.xml"
tree_new = ET.ElementTree(self.rootelement)
if "print_path" in args:
print(("Write tree to file " + save_dir))
tree_new.write(save_dir)
#self.tree.write("changed_model.xml")
self.tree.write(save_dir)
def add_to_project_name(self, s):
"""add string s to project name, e.g. Number of uncertainty run"""
name = self.rootelement.get("projectName")
name_new = name + " " + s
self.rootelement.set("projectName",name_new)
def create_TOUGH_formation_names(self, **kwds):
"""create formation names that are compatible with format required by TOUGH,
i.e. String of length 5
returns and stores as dictionary with Geomodeller Names as key and TOUGH names as entry
(self.tough_formation_names)
simply cuts original formation name to a name length of 5;
if cut name already exists: create new name, three first string values followed by two integers
that are subsequently increased
optional keywods:
out = string : set out formation to this name (for TOUGH2: no leading spaces allowed! set to 5 chars!)
"""
# import str
self.tough_formation_names = {}
# check if self.formation_names list already exists, if not: create
try: self.formation_names
except AttributeError:
self.formation_names = self.get_stratigraphy_list(**kwds)
# create list with tough names to check if name already exists
tough_name_list = []
for i,name in enumerate(self.formation_names):
# if self.formation_names[i] == ' out' or self.formation_names[i] == ' OUT':
# tough_name_list.append("OUT ")
# continue
cut_name = self.formation_names[i][0:5]
if cut_name in tough_name_list:
for j in range(100):
if "%3s%02d" % (cut_name[0:3],j) in tough_name_list:
continue
else:
cut_name = "%3s%02d" % (cut_name[0:3],j)
tough_name_list.append(cut_name)
break
else:
tough_name_list.append(cut_name)
self.tough_formation_names[name] = "%5s" % str.upper(cut_name)
return self.tough_formation_names
if __name__ == '__main__':
print ("main")
| lgpl-3.0 |
ueshin/apache-spark | python/pyspark/sql/dataframe.py | 9 | 102339 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.",
FutureWarning
)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except Exception as e:
raise ValueError(
"Unable to parse datatype from schema. %s" % e) from e
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise ValueError("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool or int, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if not isinstance(n, int) or isinstance(n, bool):
raise TypeError("Parameter 'n' (number of rows) must be an int")
if not isinstance(vertical, bool):
raise TypeError("Parameter 'vertical' must be a bool")
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
try:
int_truncate = int(truncate)
except ValueError:
raise TypeError(
"Parameter 'truncate={}' should be either bool or int.".format(truncate))
print(self._jdf.showString(n, int_truncate, vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition(3, "name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise TypeError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise TypeError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise TypeError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise TypeError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise TypeError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise TypeError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise TypeError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise TypeError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise TypeError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise TypeError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise TypeError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)):
raise TypeError("relativeError should be numerical (float, int)")
if relativeError < 0:
raise ValueError("relativeError should be >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise TypeError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
if not isinstance(col, Column):
raise TypeError("col should be Column")
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise TypeError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def to_pandas_on_spark(self, index_col=None):
"""
Converts the existing DataFrame into a pandas-on-Spark DataFrame.
If a pandas-on-Spark DataFrame is converted to a Spark DataFrame and then back
to pandas-on-Spark, it will lose the index information and the original index
will be turned into a normal column.
This is only available if Pandas is installed and available.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
pyspark.pandas.frame.DataFrame.to_spark
Examples
--------
>>> df.show() # doctest: +SKIP
+----+----+
|Col1|Col2|
+----+----+
| a| 1|
| b| 2|
| c| 3|
+----+----+
>>> df.to_pandas_on_spark() # doctest: +SKIP
Col1 Col2
0 a 1
1 b 2
2 c 3
We can specify the index columns.
>>> df.to_pandas_on_spark(index_col="Col1"): # doctest: +SKIP
Col2
Col1
a 1
b 2
c 3
"""
from pyspark.pandas.namespace import _get_index_map
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
index_spark_columns, index_names = _get_index_map(self, index_col)
internal = InternalFrame(
spark_frame=self, index_spark_columns=index_spark_columns, index_names=index_names
)
return DataFrame(internal)
# Keep to_koalas for backward compatibility for now.
def to_koalas(self, index_col=None):
warnings.warn(
"DataFrame.to_koalas is deprecated. Use DataFrame.to_pandas_on_spark instead.",
FutureWarning,
)
return self.to_pandas_on_spark(index_col)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
endolith/scikit-image | skimage/viewer/plugins/color_histogram.py | 40 | 3271 | import numpy as np
import matplotlib.pyplot as plt
from ... import color, exposure
from .plotplugin import PlotPlugin
from ..canvastools import RectangleTool
class ColorHistogram(PlotPlugin):
name = 'Color Histogram'
def __init__(self, max_pct=0.99, **kwargs):
super(ColorHistogram, self).__init__(height=400, **kwargs)
self.max_pct = max_pct
print(self.help())
def attach(self, image_viewer):
super(ColorHistogram, self).attach(image_viewer)
self.rect_tool = RectangleTool(self,
on_release=self.ab_selected)
self._on_new_image(image_viewer.image)
def _on_new_image(self, image):
self.lab_image = color.rgb2lab(image)
# Calculate color histogram in the Lab colorspace:
L, a, b = self.lab_image.T
left, right = -100, 100
ab_extents = [left, right, right, left]
self.mask = np.ones(L.shape, bool)
bins = np.arange(left, right)
hist, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(),
bins, normed=True)
self.data = {'bins': bins, 'hist': hist, 'edges': (x_edges, y_edges),
'extents': (left, right, left, right)}
# Clip bin heights that dominate a-b histogram
max_val = pct_total_area(hist, percentile=self.max_pct)
hist = exposure.rescale_intensity(hist, in_range=(0, max_val))
self.ax.imshow(hist, extent=ab_extents, cmap=plt.cm.gray)
self.ax.set_title('Color Histogram')
self.ax.set_xlabel('b')
self.ax.set_ylabel('a')
def help(self):
helpstr = ("Color Histogram tool:",
"Select region of a-b colorspace to highlight on image.")
return '\n'.join(helpstr)
def ab_selected(self, extents):
x0, x1, y0, y1 = extents
self.data['extents'] = extents
lab_masked = self.lab_image.copy()
L, a, b = lab_masked.T
self.mask = ((a > y0) & (a < y1)) & ((b > x0) & (b < x1))
lab_masked[..., 1:][~self.mask.T] = 0
self.image_viewer.image = color.lab2rgb(lab_masked)
def output(self):
"""Return the image mask and the histogram data.
Returns
-------
mask : array of bool, same shape as image
The selected pixels.
data : dict
The data describing the histogram and the selected region.
The dictionary contains:
- 'bins' : array of float
The bin boundaries for both `a` and `b` channels.
- 'hist' : 2D array of float
The normalized histogram.
- 'edges' : tuple of array of float
The bin edges along each dimension
- 'extents' : tuple of float
The left and right and top and bottom of the selected region.
"""
return (self.mask, self.data)
def pct_total_area(image, percentile=0.80):
"""Return threshold value based on percentage of total area.
The specified percent of pixels less than the given intensity threshold.
"""
idx = int((image.size - 1) * percentile)
sorted_pixels = np.sort(image.flat)
return sorted_pixels[idx]
| bsd-3-clause |
mayblue9/scikit-learn | examples/classification/plot_lda_qda.py | 78 | 5046 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
| bsd-3-clause |
l1ll1/cvl-fabric-launcher | pyinstaller-2.1/PyInstaller/loader/rthooks/pyi_rth_mplconfig.py | 10 | 1430 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# matplotlib will create $HOME/.matplotlib folder in user's home directory.
# In this directory there is fontList.cache file which lists paths
# to matplotlib fonts.
#
# When you run your onefile exe for the first time it's extracted to for example
# "_MEIxxxxx" temp directory and fontList.cache file is created with fonts paths
# pointing to this directory.
#
# Second time you run your exe new directory is created "_MEIyyyyy" but
# fontList.cache file still points to previous directory which was deleted.
# And then you will get error like:
#
# RuntimeError: Could not open facefile
#
# We need to force matplotlib to recreate config directory every time you run
# your app.
import atexit
import os
import shutil
import tempfile
# Put matplot config dir to temp directory.
configdir = tempfile.mkdtemp()
os.environ['MPLCONFIGDIR'] = configdir
try:
# Remove temp directory at application exit and ignore any errors.
atexit.register(shutil.rmtree, configdir, ignore_errors=True)
except OSError:
pass
| gpl-3.0 |
gzampieri/Scuba | prioritizer.py | 1 | 7833 |
"""
This file is part of Scuba.
Copyright (C) 2016 by Guido Zampieri, Dinh Tran, Michele Donini.
Scuba is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Scuba is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Scuba. If not, see <http://www.gnu.org/licenses/>.
@author: Michele Donini
"""
from cvxopt import matrix, solvers
import numpy as np
import pandas as pd
class EasyMKL():
""" EasyMKL algorithm.
The parameter lambda_p has to be validated from 0 to 1.
For more information:
EasyMKL: a scalable multiple kernel learning algorithm
by Fabio Aiolli and Michele Donini
Paper @ http://www.math.unipd.it/~mdonini/publications.html
Attributes:
Ktr_sum -- Sum of training kernel matrices.
q -- List of average of entries of Ktr_sum.
large_data -- Boolean variable, indicates if training data is heavy.
lambda_p -- Regularization parameter (float, range [0,1]).
tracenorm -- Boolean variable, indicates whether kernels traces have to be normalized.
labels -- List of labels of training examples.
gammas -- List of weights for examples.
weights -- List of weights for kernel matrices.
r -- List of scores for training examples.
"""
def __init__(self, Ktr_list = None, lambda_p = 0.1, tracenorm = True, large_data = False):
if Ktr_list == None:
self.Ktr_sum = None
self.q = None
self.large_data = large_data
else:
self.set_Ktr_list(Ktr_list, large_data)
self.lambda_p = lambda_p
self.tracenorm = tracenorm
self.labels = None
self.gammas = None
self.weights = None
self.r = None
def set_lambda(self, lambda_p):
""" Set lambda_p. """
self.lambda_p = lambda_p
return self
def set_Ktr_list(self, Ktr_list, large_data):
""" Set Ktr_sum, q and large_data. """
if large_data:
self.Ktr_sum = self.sum_kernels(Ktr_list)
self.q = self.sum_rows(self.Ktr_sum)
self.large_data = True
else:
self.Ktr_sum = self.sum_kernels(self.normalize_kernels(Ktr_list))
self.q = None
self.large_data = False
return
def get_trace(self, K):
""" Get trace of a matrix. """
return sum([K.iloc[i,i] for i in range(K.shape[0])]) / K.shape[0]
def normalize_kernels(self, K_list):
""" Divide kernel matrices by their traces. """
traces = [self.get_trace(K) for K in K_list]
# Divide each matrix by its trace
if self.tracenorm:
for matrix_idx in range(len(K_list)):
K_list[matrix_idx] = K_list[matrix_idx].divide(traces[matrix_idx])
return K_list
def sum_kernels(self, K_list, weights = None):
""" Compute sum of kernels.
Parameters:
K_list -- List of kernel matrices.
weights -- List of linear coefficients (non specified by default).
Return:
A -- Kernel matrix created by summing all the kernels.
Notes:
If weights is None, the sum is the ordinary sum.
Otherwise, each kernel is multiplied by its corresponding weight and then summed.
"""
M = K_list[0].shape[0]
N = K_list[0].shape[1]
A = pd.DataFrame(data=np.zeros((M,N)), index=K_list[0].index, columns=K_list[0].columns)
if weights == None:
for K in K_list:
A = A.add(K)
else:
for w,K in zip(weights, K_list):
A = A.add(w*K)
return A
def sum_rows(self, K):
""" Get average of rows values. """
q = K.sum(axis=0).divide(K.shape[0])
return q
def train(self, Ktr_list, labels):
""" Train the model.
Parameters:
Ktr_list --- List of kernel matrices for training examples.
labels --- List of labels of training examples.
"""
# Set labels as +1/-1
set_labels = set(labels)
if len(set_labels) != 2:
raise ValueError('The different labels are not 2')
elif (-1 in set_labels and 1 in set_labels):
self.labels = pd.Series(data=labels, index=self.Ktr_sum.index)
else:
poslab = np.max(list(set_labels))
self.labels = pd.Series(data=[1 if i==poslab else -1 for i in labels], index=self.Ktr_sum.index)
Xp = list(self.labels[self.labels == 1].index)
Xn = list(self.labels[self.labels == -1].index)
# Compute gammas
self.gammas = self.komd_train(self.Ktr_sum, self.labels, Xp, Xn)
# Compute weights
self.weights = self.get_weights(Ktr_list, self.gammas, self.labels, Xp, Xn)
# Compute final gammas
Ktr_weighted_sum = self.sum_kernels(Ktr_list, self.weights)
self.gammas = self.komd_train(Ktr_weighted_sum, self.labels, Xp, Xn)
yg = self.gammas*self.labels
self.r = Ktr_weighted_sum.dot(yg).values
return self.weights
def komd_train(self, K, labels, Xp, Xn):
""" Train KOMD. """
YY = pd.DataFrame(data=np.diag(list(labels)), index=self.Ktr_sum.index, columns=self.Ktr_sum.columns)
P = matrix(2*((1.0-self.lambda_p) * YY.dot(K).dot(YY).as_matrix() + np.diag([self.lambda_p]*len(labels))))
q = matrix([0.0]*len(labels))
G = -matrix(np.diag([1.0]*len(labels)))
h = matrix([0.0]*len(labels),(len(labels),1))
A = matrix([[1.0 if lab==+1 else 0 for lab in labels],[1.0 if lab2==-1 else 0 for lab2 in labels]]).T
b = matrix([[1.0],[1.0]],(2,1))
solvers.options['show_progress']=False
sol = solvers.qp(P,q,G,h,A,b)
gammas = pd.Series(data=sol['x'], index=self.Ktr_sum.index)
return gammas
def get_weights(self, Ktr_list, gammas, labels, Xp, Xn):
""" Get weights of kernels. """
weights = []
if self.large_data:
X = Xp + [Xn[np.random.randint(len(Xn))] for i in range(int((len(Xp)+len(Xn))/10))]
g = gammas.loc[X]
l = labels.loc[X]
yg = g*l
for K in Ktr_list:
k = K.loc[X,X]
b = yg.dot(k).dot(yg)
weights.append(b)
else:
yg = gammas*labels
for K in Ktr_list:
b = yg.dot(K).dot(yg)
weights.append(b)
norm2 = sum([w for w in weights])
weights = [w / norm2 for w in weights]
return weights
def rank(self, Ktest_list = None):
""" Compute the probability distribution for positive examples.
Parameters:
Ktest_list --- List of kernel matrices of test examples.
Return:
r -- List of scores for test examples.
"""
if self.weights == None:
raise ValueError('EasyMKL has to be trained first!')
if Ktest_list == None:
r = self.r
else:
yg = self.gammas*self.labels
Ktest_weighted_sum = self.sum_kernels(Ktest_list, self.weights)
r = Ktest_weighted_sum.dot(yg).as_matrix()
return list(r)
class Scuba(EasyMKL):
""" EasyMKL algorithm with unbalanced regularization.
Attributes:
lambda_p -- Regularization parameter for positive examples (float, range [0,1]).
"""
def __init__(self, Ktr_list = None, lambda_p = 0.1, tracenorm = True, large_data = False):
EasyMKL.__init__(self, Ktr_list, lambda_p, tracenorm, large_data)
def komd_train(self, K, labels, Xp, Xn):
""" Train KOMD with unbalanced regularization. """
P = matrix((1.0-self.lambda_p)*K.loc[Xp,Xp].values + np.diag([self.lambda_p]*len(Xp)))
if self.large_data:
q = matrix(-(1.0-self.lambda_p)*self.q.loc[Xp].values)
else:
q = matrix(-(1.0-self.lambda_p)*K.loc[Xp,Xn].values.dot([1.0/len(Xn)]*len(Xn)))
G = -matrix(np.diag([1.0]*len(Xp)))
h = matrix(0.0, (len(Xp),1))
A = matrix(1.0, (len(Xp),1)).T
b = matrix(1.0)
solvers.options['show_progress']=False
sol = solvers.qp(P,q,G,h,A,b)
gammas = pd.Series(data=[1.0/len(Xn)]*(len(Xp)+len(Xn)), index=self.Ktr_sum.index)
for idx,v in enumerate(Xp):
gammas[v] = sol['x'][idx]
return gammas
| gpl-2.0 |
JanetMatsen/Machine_Learning_CSE_546 | HW3/code/not_updated/ridge_regression.py | 2 | 12001 | import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg as splin
import time;
from classification_base import ClassificationBase
class RidgeMulti(ClassificationBase):
"""
Train multiple ridge models.
"""
def __init__(self, X, y, lam, W=None, verbose=False, sparse=True,
test_X=None, test_y = None, kernelized=False):
"""
test_X, test_y are for compatibility only, because the questions for
other methods require knowing test data during fitting.
"""
super(RidgeMulti, self).__init__(X=X, y=y, W=W, sparse=sparse)
self.sparse = sparse
if self.sparse:
assert lam != 0, "can't invert the big stuff with lambda = 0."
self.X = sp.csc_matrix(self.X)
self.Y = sp.csc_matrix(self.Y)
self.lam = lam
self.W = None # don't want to have W before solving!
self.matrix_work = None
self.verbose = verbose
self.kernelized = kernelized
def get_weights(self):
if self.sparse:
return self.W.toarray()
if not self.sparse:
return self.W
def apply_weights(self):
if self.verbose:
print("Apply weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
# Apply weights
if self.sparse:
assert type(self.W) == sp.csc_matrix or type(self.W) == sp.csr_matrix, \
"type of W is {}".format(type(self.W))
assert type(self.X) == sp.csc_matrix, \
"type of W is {}".format(type(self.X))
prod = self.X.dot(self.W)
if self.verbose:
print("Done applying weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
if type(prod) == sp.csc_matrix:
return prod.toarray()
else:
return prod
else:
if self.verbose:
print("Done applying weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
return self.X.dot(self.W)
def optimize(self):
# When solving multiclass, (X^TX + lambdaI)-1X^T is shared
# solve it once and share it with all the regressors.
# find lambda*I_D + X^T*X
if self.verbose: print("optimize: multiply matrices before inversion.")
# Get (X^TX + lambdaI)
if self.sparse:
piece_to_invert = sp.csc_matrix(sp.identity(self.d)*self.lam) + \
self.X.T.dot(self.X)
else:
piece_to_invert = np.identity(self.d)*self.lam + self.X.T.dot(self.X)
assert piece_to_invert.shape == (self.d, self.d)
# Invert (X^TX + lambdaI)
if self.verbose:
print("invert matrix:")
print("time: {}".format(time.asctime(time.localtime(time.time()))))
if self.sparse:
inverted_piece = splin.inv(piece_to_invert)
else:
inverted_piece = np.linalg.inv(piece_to_invert)
# Dot with X^T
if self.verbose:
print("time: {}".format(time.asctime(time.localtime(time.time()))))
print("dot with X^T:")
self.matrix_work = inverted_piece.dot(self.X.T)
assert self.matrix_work.shape == (self.d, self.N)
if self.verbose:
print("train the {} classifiers:".format(self.C))
# Train C classifiers.
self.W = self.matrix_work.dot(self.Y)
if self.verbose:
print("done generating weights.")
assert self.W.shape == (self.d, self.C)
return self.W
def kernelized_optimize(self):
# fact: H^T(HH^T + lambda*I_N) == (lambda*I_d + H^TH)H^T
# instead of inverting a dxd matrix, we invert an nxn matrix.
# So our ridge formula becomes:
# (lambda*I_d + H^TH)^(-1)H^T = H^T(HH^T + lambdaI_N)^(-1)
if self.sparse:
piece_to_invert = self.X.dot(self.X.T) + sp.identity(self.N)*self.lam
else:
piece_to_invert = self.X.dot(self.X.T) + np.identity(self.N)*self.lam
assert piece_to_invert.shape == (self.N, self.N) # yay!
# invert this NxN matrix.
if self.verbose:
print("invert matrix:")
print("time: {}".format(time.asctime(time.localtime(time.time()))))
if self.sparse:
inverted_piece = splin.inv(piece_to_invert)
else:
inverted_piece = np.linalg.inv(piece_to_invert)
if self.verbose:
print("done inverting via kernel trick at time: {}".format(time.asctime(time.localtime(time.time()))))
# dot with H^T.dot(y)
if self.verbose:
print("dot with H^T at time: {}".format(time.asctime(time.localtime(time.time()))))
self.W = self.X.T.dot(inverted_piece).dot(self.Y)
if self.verbose:
print("done dotting with H^T at time: {}".format(time.asctime(time.localtime(time.time()))))
assert self.W.shape == (self.d, self.C)
def predict(self):
if self.verbose:
print("prediction time.")
if self.W is None:
if self.kernelized:
self.kernelized_optimize()
else:
self.optimize()
Yhat = self.apply_weights()
assert type(Yhat) == np.ndarray
classes = np.argmax(Yhat, axis=1)
if self.sparse:
yhat = np.multiply(self.Y.toarray(), Yhat)
else:
yhat = np.multiply(self.Y, Yhat)
# collapse it into an Nx1 array:
self.yhat = np.amax(yhat, axis=1)
return classes
def run(self):
self.predict()
self.results = pd.DataFrame(self.results_row())
def loss_01(self):
return self.pred_to_01_loss(self.predict())
def results_row(self):
"""
Return a dictionary that can be put into a Pandas DataFrame.
"""
results_row = super(RidgeMulti, self).results_row()
# append on Ridge regression-specific results
more_details = {
"lambda":[self.lam],
"training SSE":[self.sse()],
"training RMSE":[self.rmse()],
"kernelized solvin":[self.kernelized]
}
results_row.update(more_details)
return results_row
def sse(self):
"""
Calculate the sum of squared errors.
In class on 10/26, Sham coached us to include errors for all
classifications in our RMSE (and thus SSE) calculations.
For y = [0, 1], Y=[[0, 1], [1, 0]], Yhat = [[0.01, 0.95], [0.99, 0.03]],
SSE = sum(0.01**2 + 0.05**2 + 0.01**2 + 0.03**2) = RSS
Note: this would not be equivalent to the binary classifier, which
would only sum (0.05**2 + 0.03**2)
My formula before only used the errors for the correct class:
error = self.apply_weights() - self.Y
error = np.multiply(error, self.Y)
error = np.amax(np.abs(error), axis=1)
return error.T.dot(error)
:return: sum of squared errors for all classes for each point (float)
"""
if self.sparse:
error = self.apply_weights() - self.Y.toarray()
assert type(error) == np.ndarray
else:
error = self.apply_weights() - self.Y
return np.multiply(error, error).sum()
def rmse(self):
"""
For the binary classifier, RMSE = (SSE/N)**0.5.
For the multiclass one, SSE is counting errors for all classifiers.
We could use (self.sse()/self.N/self.C)**0.5 to make the RMSE
calcs more similar between the binary and multi-class classifiers,
but they still are not the same, so I won't.
:return: RMSE (float)
"""
return(self.sse()/self.N)**0.5
class RidgeBinary(ClassificationBase):
"""
Train *one* ridge model.
"""
def __init__(self, X, y, lam, w=None, test_X=None, test_y = None):
"""
test_X, test_y are for compatibility only, because the questions for
other methods require knowing test data during fitting.
"""
self.X = X
self.N, self.d = X.shape
self.y = y
self.lam = lam
if w is None:
self.w = np.zeros(self.d)
assert self.w.shape == (self.d, )
self.threshold = None
def get_weights(self):
return self.w
def apply_weights(self):
return self.X.dot(self.w)
def run(self):
# find lambda*I_D + X^T*X
piece_to_invert = np.identity(self.d)*self.lam + self.X.T.dot(self.X)
inverted_piece = np.linalg.inv(piece_to_invert)
solution = inverted_piece.dot(self.X.T)
solution = solution.dot(self.y)
solution = np.squeeze(np.asarray(solution))
assert solution.shape == (self.d, )
self.w = solution
self.results = pd.DataFrame(self.results_row())
def predict(self, threshold):
if self.verbose:
print("dot X with W to make predictions. {}".format(time.asctime(time.localtime(time.time()))))
# TODO: having a default cutoff is a terrible idea!
Yhat = self.X.dot(self.w)
if self.verbose:
print("done dotting. {}".format(time.asctime(time.localtime(time.time()))))
classes = np.zeros(self.N)
classes[Yhat > threshold] = 1
return classes
def loss_01(self, threshold=None):
if threshold is None:
threshold=0.5
print("WARNING: 0/1 loss is calculated for threshold=0.5, which "
"is very likely to be a poor choice!!")
return self.pred_to_01_loss(self.predict(threshold))
def results_row(self):
"""
Return a dictionary that can be put into a Pandas DataFrame.
"""
results_row = super(RidgeBinary, self).results_row()
# append on logistic regression-specific results
more_details = {
"lambda":[self.lam],
"SSE":[self.sse()],
"RMSE":[self.rmse()],
}
results_row.update(more_details)
return results_row
def sse(self):
# sse = RSS
error = self.apply_weights() - self.y
return error.T.dot(error)
def rmse(self):
return(self.sse()/self.N)**0.5
class RidgeRegularizationPath:
""" DEPRECATED """
# TODO: refactor so it uses HyperparameterSweep class
def __init__(self, train_X, train_y, lam_max, frac_decrease, steps,
val_X, val_y):
self.train_X = train_X
self.train_y = train_y
self.train_N, self.train_d = train_X.shape
self.lam_max = lam_max
self.frac_decrease = frac_decrease
self.steps = steps
self.val_X = val_X
self.val_y = val_y
def train_with_lam(self, lam):
rr = RidgeBinary(self.train_X, self.train_y, lam=lam)
rr.solve()
sse_train = rr.sse()
# replace the y values with the validation y and get the val sss
rr.X = self.val_X
rr.y = self.val_y
sse_val = rr.sse()
assert rr.w.shape == (self.train_d, 1) # check before we slice out
return rr.w.toarray()[:,0], sse_train, sse_val
def walk_path(self):
# protect the first value of lambda.
lam = self.lam_max/self.frac_decrease
# initialize a dataframe to store results in
results = pd.DataFrame()
for c in range(0, self.steps):
lam = lam*self.frac_decrease
print("Loop {}: solving weights. Lambda = {}".format(c+1, lam))
w, sse_train, sse_val = self.train_with_lam(lam)
one_val = pd.DataFrame({"lam":[lam],
"weights":[w],
"SSE (training)": [sse_train],
"SSE (validaton)": [sse_val]})
results = pd.concat([results, one_val])
self.results_df = results
| mit |
JosPolfliet/pandas-profiling | pandas_profiling/describe.py | 1 | 15004 | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
| mit |
pradyu1993/scikit-learn | sklearn/ensemble/tests/test_base.py | 12 | 1222 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3
from numpy.testing import assert_equal
from nose.tools import assert_raises, assert_true
from sklearn.ensemble import BaseEnsemble
from sklearn.tree import DecisionTreeClassifier
def test_base():
"""Check BaseEnsemble methods."""
tree = DecisionTreeClassifier()
ensemble = BaseEnsemble(base_estimator=tree, n_estimators=3)
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], DecisionTreeClassifier))
def test_error():
"""Check that proper errors are triggered."""
def instantiate(class_name, **params):
return class_name(**params)
base_estimator = object()
assert_raises(TypeError, instantiate, class_name=BaseEnsemble,
base_estimator=base_estimator, n_estimators=1)
base_estimator = DecisionTreeClassifier()
assert_raises(ValueError, instantiate, class_name=BaseEnsemble,
base_estimator=base_estimator, n_estimators=-1)
| bsd-3-clause |
ccauet/scikit-optimize | skopt/optimizer/gp.py | 1 | 11181 | """Gaussian process-based minimization algorithms."""
import numpy as np
from sklearn.utils import check_random_state
from .base import base_minimize
from ..learning import GaussianProcessRegressor
from ..learning.gaussian_process.kernels import ConstantKernel
from ..learning.gaussian_process.kernels import HammingKernel
from ..learning.gaussian_process.kernels import Matern
from ..space import check_dimension
from ..space import Categorical
from ..space import Space
def gp_minimize(func, dimensions, base_estimator=None,
n_calls=100, n_random_starts=10,
acq_func="gp_hedge", acq_optimizer="lbfgs", x0=None, y0=None,
random_state=None, verbose=False, callback=None,
n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96,
noise="gaussian", n_jobs=1):
"""Bayesian optimization using Gaussian Processes.
If every function evaluation is expensive, for instance
when the parameters are the hyperparameters of a neural network
and the function evaluation is the mean cross-validation score across
ten folds, optimizing the hyperparameters by standard optimization
routines would take for ever!
The idea is to approximate the function using a Gaussian process.
In other words the function values are assumed to follow a multivariate
gaussian. The covariance of the function values are given by a
GP kernel between the parameters. Then a smart choice to choose the
next parameter to evaluate can be made by the acquisition function
over the Gaussian prior which is much quicker to evaluate.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_random_starts` evaluations.
Finally, `n_calls - len(x0) - n_random_starts` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_random_starts` evaluations are first made then
`n_calls - n_random_starts` subsequent evaluations are made
guided by the surrogate model.
Parameters
----------
* `func` [callable]:
Function to minimize. Should take a array of parameters and
return the function values.
* `dimensions` [list, shape=(n_dims,)]:
List of search space dimensions.
Each search dimension can be defined either as
- a `(upper_bound, lower_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(upper_bound, lower_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
NOTE: The upper and lower bounds are inclusive for `Integer`
dimensions.
* `base_estimator` [a Gaussian process estimator]:
The Gaussian process estimator to use for optimization.
By default, a Matern kernel is used with the following
hyperparameters tuned.
- All the length scales of the Matern kernel.
- The covariance amplitude that each element is multiplied with.
- Noise that is added to the matern kernel. The noise is assumed
to be iid gaussian.
* `n_calls` [int, default=100]:
Number of calls to `func`.
* `n_random_starts` [int, default=10]:
Number of evaluations of `func` with random initialization points
before approximating the `func` with `base_estimator`.
* `acq_func` [string, default=`"EI"`]:
Function to minimize over the gaussian prior. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"gp_hedge"` Probabilistically choose one of the above three
acquisition functions at every iteration. The weightage
given to these gains can be set by `\eta` through `acq_func_kwargs`.
- The gains `g_i` are initialized to zero.
- At every iteration,
- Each acquisition function is optimised independently to propose an
candidate point `X_i`.
- Out of all these candidate points, the next point `X_best` is
chosen by `softmax(\eta g_i)`
- After fitting the surrogate model with `(X_best, y_best)`,
the gains are updated such that `g_i -= \mu(X_i)`
Reference: https://dslpitt.org/uai/papers/11/p327-hoffman.pdf
* `acq_optimizer` [string, `"sampling"` or `"lbfgs"`, default=`"lbfgs"`]:
Method to minimize the acquistion function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
The `acq_func` is computed at `n_points` sampled randomly.
- If set to `"sampling"`, then the point among these `n_points`
where the `acq_func` is minimum is the next candidate minimum.
- If set to `"lbfgs"`, then
- The `n_restarts_optimizer` no. of points which the acquisition
function is least are taken as start points.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
* `x0` [list, list of lists or `None`]:
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
* `y0` [list, scalar or `None`]
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
* `verbose` [boolean, default=False]:
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
* `callback` [callable, list of callables, optional]
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
* `n_points` [int, default=10000]:
Number of points to sample to determine the next "best" point.
Useless if acq_optimizer is set to `"lbfgs"`.
* `n_restarts_optimizer` [int, default=5]:
The number of restarts of the optimizer when `acq_optimizer`
is `"lbfgs"`.
* `kappa` [float, default=1.96]:
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
* `xi` [float, default=0.01]:
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
* `noise` [float, default="gaussian"]:
- Use noise="gaussian" if the objective returns noisy observations.
The noise of each observation is assumed to be iid with
mean zero and a fixed variance.
- If the variance is known before-hand, this can be set directly
to the variance of the noise.
- Set this to a value close to zero (1e-10) if the function is
noise-free. Setting to zero might cause stability issues.
* `n_jobs` [int, default=1]
Number of cores to run in parallel while running the lbfgs
optimizations over the acquisition function. Valid only
when `acq_optimizer` is set to "lbfgs."
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
Returns
-------
* `res` [`OptimizeResult`, scipy object]:
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
"""
# Check params
rng = check_random_state(random_state)
dim_types = [check_dimension(d) for d in dimensions]
is_cat = all([isinstance(check_dimension(d), Categorical)
for d in dim_types])
if is_cat:
transformed_dims = [check_dimension(d, transform="identity")
for d in dimensions]
else:
transformed_dims = []
for dim_type, dim in zip(dim_types, dimensions):
if isinstance(dim_type, Categorical):
transformed_dims.append(
check_dimension(dim, transform="onehot")
)
# To make sure that GP operates in the [0, 1] space
else:
transformed_dims.append(
check_dimension(dim, transform="normalize")
)
space = Space(transformed_dims)
# Default GP
if base_estimator is None:
cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
if is_cat:
other_kernel = HammingKernel(
length_scale=np.ones(space.transformed_n_dims))
acq_optimizer = "sampling"
else:
other_kernel = Matern(
length_scale=np.ones(space.transformed_n_dims),
length_scale_bounds=[(0.01, 100)] * space.transformed_n_dims,
nu=2.5)
base_estimator = GaussianProcessRegressor(
kernel=cov_amplitude * other_kernel,
normalize_y=True, random_state=rng, alpha=0.0, noise=noise,
n_restarts_optimizer=2)
return base_minimize(
func, dimensions, base_estimator=base_estimator,
acq_func=acq_func,
xi=xi, kappa=kappa, acq_optimizer=acq_optimizer, n_calls=n_calls,
n_points=n_points, n_random_starts=n_random_starts,
n_restarts_optimizer=n_restarts_optimizer,
x0=x0, y0=y0, random_state=random_state, verbose=verbose,
callback=callback, n_jobs=n_jobs)
| bsd-3-clause |
Varentsov/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
collbb/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
TomAugspurger/pandas | pandas/tests/frame/methods/test_align.py | 1 | 9220 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
class TestDataFrameAlign:
def test_align_float(self, float_frame):
af, bf = float_frame.align(float_frame)
assert af._mgr is not float_frame._mgr
af, bf = float_frame.align(float_frame, copy=False)
assert af._mgr is float_frame._mgr
# axis = 0
other = float_frame.iloc[:-5, :3]
af, bf = float_frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="right", axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = float_frame.iloc[:-5, :3].copy()
af, bf = float_frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, float_frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = float_frame.index.join(other.index)
diff_a = float_frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = float_frame.align(other, join="inner", axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None
)
tm.assert_index_equal(bf.index, Index([]))
af, bf = float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
float_frame.align(af.iloc[0, :3], join="inner", axis=2)
# align dataframe to series with broadcast or not
idx = float_frame.index
s = Series(range(len(idx)), index=idx)
left, right = float_frame.align(s, axis=0)
tm.assert_index_equal(left.index, float_frame.index)
tm.assert_index_equal(right.index, float_frame.index)
assert isinstance(right, Series)
left, right = float_frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, float_frame.index)
expected = {c: s for c in float_frame.columns}
expected = DataFrame(
expected, index=float_frame.index, columns=float_frame.columns
)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
result = df[df["a"] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
result = df.where(df["a"] == 2, 0)
expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def test_align_int(self, int_frame):
# test other non-float types
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = int_frame.align(other, join="inner", axis=1, method="pad")
tm.assert_index_equal(bf.columns, other.columns)
def test_align_mixed_type(self, float_string_frame):
af, bf = float_string_frame.align(
float_string_frame, join="inner", axis=1, method="pad"
)
tm.assert_index_equal(bf.columns, float_string_frame.columns)
def test_align_mixed_float(self, mixed_float_frame):
# mixed floats/ints
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_float_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_mixed_int(self, mixed_int_frame):
other = DataFrame(index=range(5), columns=["A", "B", "C"])
af, bf = mixed_int_frame.align(
other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0
)
tm.assert_index_equal(bf.index, Index([]))
def test_align_multiindex(self):
# GH#10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product(
[range(2), range(3), range(2)], names=("a", "b", "c")
)
idx = pd.Index(range(2), name="b")
df1 = pd.DataFrame(np.arange(12, dtype="int64"), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype="int64"), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join="left")
res2l, res2r = df2.align(df1, join="right")
expl = df1
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join="right")
res2l, res2r = df2.align(df1, join="left")
exp_idx = pd.MultiIndex.from_product(
[range(2), range(2), range(2)], names=("a", "b", "c")
)
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_frame_equal(expl, res1l)
tm.assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_frame_equal(expr, res1r)
tm.assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE"))
s = pd.Series([1, 2, 4], index=list("ABD"), name="x")
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame(
{"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]},
index=list("ABCDE"),
)
exp2 = pd.Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x")
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(
b, axis=axis, join=how, method=method, limit=limit, fill_axis=fill_axis
)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
tm.assert_frame_equal(aa, ea)
tm.assert_frame_equal(ab, eb)
@pytest.mark.parametrize("meth", ["pad", "bfill"])
@pytest.mark.parametrize("ax", [0, 1, None])
@pytest.mark.parametrize("fax", [0, 1])
@pytest.mark.parametrize("how", ["inner", "outer", "left", "right"])
def test_align_fill_method(self, how, meth, ax, fax, float_frame):
df = float_frame
self._check_align_fill(df, how, meth, ax, fax)
def _check_align_fill(self, frame, kind, meth, ax, fax):
left = frame.iloc[0:4, :10]
right = frame.iloc[2:, 6:]
empty = frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
left, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
empty, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
left, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth)
self._check_align(
empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1
)
| bsd-3-clause |
djgagne/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/coleta/search_streaming.py | 1 | 1407 | import pandas as pd
import numpy as np
import tweepy
import time
from datetime import datetime
import csv
from unicodedata import normalize
#Credencias de acesso App Twitter
consumer_key = "NBL0CtVrn2ajbpaGEWC1GBY2c"
consumer_secret = "2F5Uz5VYg0ONu4xTYYZsWkAGfc3TYXCkXLCsXMJ1eCKOfhBTfS"
access_token = "2345718031-we2K2PETQXkz7NCexjdGuvE2L2rnd5KfouzN3Up"
access_token_secret = "aEQPKGifu1y29Wbh3u6Z0YIcjAsBC8VeD4Y75CDL2r12o"
#acessa OAuth
# Referencia para API: https://dev.twitter.com/rest/reference
authentication = tweepy.OAuthHandler(consumer_key, consumer_secret)
authentication.set_access_token(access_token, access_token_secret)
api = tweepy.API(authentication)
def write_file(datas,filename):
with open('%s.csv'%(filename), 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=';')
for row in datas:
spamwriter.writerow(row)
def acents(text):
return normalize('NFKD',text).encode('ASCII','ignore').decode('ASCII')
if __name__ == '__main__':
log = []
while True:
tags = []
line = []
trends = api.trends_place(23424768)
data = trends[0]
trend = data['trends']
for item in trend:
name = str(item['name'])
name = acents(name)
if name not in log:
l = name,str(datetime.now())
line.append(l)
tags.append(name)
log.extend(tags)
print(log)
print(len(log))
write_file(line,'tags')
print('-------------')
time.sleep(60)
| gpl-3.0 |
xiaoxiamii/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter1/fig_moving_objects_multicolor.py | 3 | 4580 | """
SDSS Stripe 82 Moving Object Catalog
------------------------------------
Figure 1.12.
A multicolor scatter plot of the properties of asteroids from the SDSS Moving
Object Catalog (cf. figure 1.8). The left panel shows observational markers
of the chemical properties of the asteroids: two colors a* and i-z. The
right panel shows the orbital parameters: semimajor axis a vs. the sine of
the inclination. The color of points in the right panel reflects their
position in the left panel. This plot is similar to that used in
figures 3-4 of Parker et al 2008.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_moving_objects
from astroML.plotting.tools import devectorize_axes
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def black_bg_subplot(*args, **kwargs):
"""Create a subplot with black background"""
kwargs['axisbg'] = 'k'
ax = plt.subplot(*args, **kwargs)
# set ticks and labels to white
for spine in ax.spines.values():
spine.set_color('w')
for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():
for child in tick.get_children():
child.set_color('w')
return ax
def compute_color(mag_a, mag_i, mag_z, a_crit=-0.1):
"""
Compute the scatter-plot color using code adapted from
TCL source used in Parker 2008.
"""
# define the base color scalings
R = np.ones_like(mag_i)
G = 0.5 * 10 ** (-2 * (mag_i - mag_z - 0.01))
B = 1.5 * 10 ** (-8 * (mag_a + 0.0))
# enhance green beyond the a_crit cutoff
G += 10. / (1 + np.exp((mag_a - a_crit) / 0.02))
# normalize color of each point to its maximum component
RGB = np.vstack([R, G, B])
RGB /= RGB.max(0)
# return an array of RGB colors, which is shape (n_points, 3)
return RGB.T
#------------------------------------------------------------
# Fetch data and extract the desired quantities
data = fetch_moving_objects(Parker2008_cuts=True)
mag_a = data['mag_a']
mag_i = data['mag_i']
mag_z = data['mag_z']
a = data['aprime']
sini = data['sin_iprime']
# dither: magnitudes are recorded only to +/- 0.01
np.random.seed(0)
mag_a += -0.005 + 0.01 * np.random.random(size=mag_a.shape)
mag_i += -0.005 + 0.01 * np.random.random(size=mag_i.shape)
mag_z += -0.005 + 0.01 * np.random.random(size=mag_z.shape)
# compute RGB color based on magnitudes
color = compute_color(mag_a, mag_i, mag_z)
#------------------------------------------------------------
# set up the plot
fig = plt.figure(figsize=(5, 2.2), facecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.3,
bottom=0.2, top=0.93)
# plot the color-magnitude plot
ax = black_bg_subplot(121)
ax.scatter(mag_a, mag_i - mag_z,
c=color, s=0.5, lw=0)
devectorize_axes(ax, dpi=400)
ax.plot([0, 0], [-0.8, 0.6], '--w', lw=1)
ax.plot([0, 0.4], [-0.15, -0.15], '--w', lw=1)
ax.set_xlim(-0.3, 0.4)
ax.set_ylim(-0.8, 0.6)
ax.set_xlabel(r'${\rm a*}$', color='w')
ax.set_ylabel(r'${\rm i-z}$', color='w')
# plot the orbital parameters plot
ax = black_bg_subplot(122)
ax.scatter(a, sini,
c=color, s=0.5, lw=0, edgecolor='none')
devectorize_axes(ax, dpi=400)
ax.plot([2.5, 2.5], [-0.02, 0.3], '--w', lw=1)
ax.plot([2.82, 2.82], [-0.02, 0.3], '--w', lw=1)
ax.set_xlim(2.0, 3.3)
ax.set_ylim(-0.02, 0.3)
ax.set_xlabel(r'${\rm a (AU)}$', color='w')
ax.set_ylabel(r'${\rm sin(i)}$', color='w')
# label the plot
text_kwargs = dict(color='w', transform=plt.gca().transAxes,
ha='center', va='bottom')
ax.text(0.25, 1.02, 'Inner', **text_kwargs)
ax.text(0.53, 1.02, 'Mid', **text_kwargs)
ax.text(0.83, 1.02, 'Outer', **text_kwargs)
# Saving the black-background figure requires some extra arguments:
#fig.savefig('moving_objects.png',
# facecolor='black',
# edgecolor='none')
plt.show()
| bsd-2-clause |
PX4/Firmware | Tools/models/sdp3x_pitot_model.py | 8 | 3344 | """
Copyright (c) 2017, Sensirion AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Sensirion AG nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# formula for metal pitot tube with round tip as here: https://drotek.com/shop/2986-large_default/sdp3x-airspeed-sensor-kit-sdp31.jpg
# and tubing as provided by px4/drotek (1.5 mm diameter)
import numpy as np
import matplotlib.pyplot as plt
P_cal=96600. #Pa
P_amb=96600. #dummy-value, use absolute pressure sensor!!
## differential pressure, sensor values in Pascal
dp_SDP33_raw=np.linspace(0,80,100)
dp_SDP33=dp_SDP33_raw*P_cal/P_amb
## total length tube in mm = length dynamic port+ length static port; compensation only valid for inner diameter of 1.5mm
l_tube=450
## densitiy air in kg/m3
rho_air=1.29
## flow through sensor
flow_SDP33=(300.805 - 300.878/(0.00344205*dp_SDP33**0.68698 + 1))*1.29/rho_air
## additional dp through pitot tube
dp_Pitot=(0.0032*flow_SDP33**2 + 0.0123*flow_SDP33+1.)*1.29/rho_air
## pressure drop through tube
dp_Tube=(flow_SDP33*0.674)/450*l_tube*rho_air/1.29
## speed at pitot-tube tip due to flow through sensor
dv=0.125*flow_SDP33
## sum of all pressure drops
dp_tot=dp_SDP33+dp_Tube+dp_Pitot
## computed airspeed without correction for inflow-speed at tip of pitot-tube
airspeed_uncorrected=np.sqrt(2*dp_tot/rho_air)
## corrected airspeed
airspeed_corrected=airspeed_uncorrected+dv
## just to compare to value without compensation
airspeed_raw=np.sqrt(2*dp_SDP33/rho_air)
plt.figure()
plt.plot(dp_SDP33,airspeed_corrected)
plt.xlabel('differential pressure raw value [Pa]')
plt.ylabel('airspeed_corrected [m/s]')
plt.show()
##plt.figure()
##plt.plot(dp_SDP33,airspeed_corrected/airspeed_raw)
##plt.xlabel('differential pressure raw value [Pa]')
##plt.ylabel('correction factor [-]')
##plt.show()
##
##
##
##plt.figure()
##plt.plot(airspeed_corrected,(airspeed_corrected-airspeed_raw)/airspeed_corrected)
##plt.xlabel('airspeed [m/s]')
##plt.ylabel('relative error [-]')
##plt.show() | bsd-3-clause |
deepfield/ibis | ibis/sql/tests/test_compiler.py | 1 | 67764 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
import ibis
import ibis.expr.api as api
import ibis.expr.operations as ops
from ibis.expr.tests.mocks import MockConnection
pytest.importorskip('sqlalchemy')
pytest.importorskip('impala.dbapi')
from ibis.impala.compiler import build_ast, to_sql, ImpalaDialect # noqa: E402
from ibis import impala # noqa: E402
class TestASTBuilder(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_ast_with_projection_join_filter(self):
table = self.con.table('test1')
table2 = self.con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
result = joined[[table3, table2['value']]]
stmt = _get_query(result)
def foo():
table3 = table[filter_pred]
joined = table2.inner_join(table3, [join_pred])
result = joined[[table3, table2['value']]]
return result
assert len(stmt.select_set) == 2
# #790, make sure the filter stays put
assert len(stmt.where) == 0
# Check that the joined tables are not altered
tbl = stmt.table_set
tbl_node = tbl.op()
assert isinstance(tbl_node, ops.InnerJoin)
assert tbl_node.left is table2
assert tbl_node.right is table3
def test_ast_with_aggregation_join_filter(self):
table = self.con.table('test1')
table2 = self.con.table('test2')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
join_pred = table3['g'] == table2['key']
joined = table2.inner_join(table3, [join_pred])
met1 = (table3['f'] - table2['value']).mean().name('foo')
result = joined.aggregate([met1, table3['f'].sum().name('bar')],
by=[table3['g'], table2['key']])
stmt = _get_query(result)
# #790, this behavior was different before
ex_pred = [table3['g'] == table2['key']]
expected_table_set = \
table2.inner_join(table3, ex_pred)
assert stmt.table_set.equals(expected_table_set)
# Check various exprs
ex_metrics = [(table3['f'] - table2['value']).mean().name('foo'),
table3['f'].sum().name('bar')]
ex_by = [table3['g'], table2['key']]
for res, ex in zip(stmt.select_set, ex_by + ex_metrics):
assert res.equals(ex)
for res, ex in zip(stmt.group_by, ex_by):
assert stmt.select_set[res].equals(ex)
# The filter is in the joined subtable
assert len(stmt.where) == 0
class TestNonTabularResults(unittest.TestCase):
"""
"""
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_simple_scalar_aggregates(self):
from pandas import DataFrame
# Things like table.column.{sum, mean, ...}()
table = self.con.table('alltypes')
expr = table[table.c > 0].f.sum()
query = _get_query(expr)
sql_query = query.compile()
expected = """SELECT sum(`f`) AS `sum`
FROM alltypes
WHERE `c` > 0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = DataFrame({'sum': [5]})
assert handler(output) == 5
def test_scalar_aggregates_multiple_tables(self):
# #740
table = ibis.table([('flag', 'string'),
('value', 'double')],
'tbl')
flagged = table[table.flag == '1']
unflagged = table[table.flag == '0']
expr = flagged.value.mean() / unflagged.value.mean() - 1
result = to_sql(expr)
expected = """\
SELECT (t0.`mean` / t1.`mean`) - 1 AS `tmp`
FROM (
SELECT avg(`value`) AS `mean`
FROM tbl
WHERE `flag` = '1'
) t0
CROSS JOIN (
SELECT avg(`value`) AS `mean`
FROM tbl
WHERE `flag` = '0'
) t1"""
assert result == expected
fv = flagged.value
uv = unflagged.value
expr = (fv.mean() / fv.sum()) - (uv.mean() / uv.sum())
result = to_sql(expr)
expected = """\
SELECT t0.`tmp` - t1.`tmp` AS `tmp`
FROM (
SELECT avg(`value`) / sum(`value`) AS `tmp`
FROM tbl
WHERE `flag` = '1'
) t0
CROSS JOIN (
SELECT avg(`value`) / sum(`value`) AS `tmp`
FROM tbl
WHERE `flag` = '0'
) t1"""
assert result == expected
def test_table_column_unbox(self):
from pandas import DataFrame
table = self.table
m = table.f.sum().name('total')
agged = table[table.c > 0].group_by('g').aggregate([m])
expr = agged.g
query = _get_query(expr)
sql_query = query.compile()
expected = """\
SELECT `g`
FROM (
SELECT `g`, sum(`f`) AS `total`
FROM alltypes
WHERE `c` > 0
GROUP BY 1
) t0"""
assert sql_query == expected
# Maybe the result handler should act on the cursor. Not sure.
handler = query.result_handler
output = DataFrame({'g': ['foo', 'bar', 'baz']})
assert (handler(output) == output['g']).all()
def test_complex_array_expr_projection(self):
# May require finding the base table and forming a projection.
expr = (self.table.group_by('g')
.aggregate([self.table.count().name('count')]))
expr2 = expr.g.cast('double')
query = impala.compile(expr2)
expected = """SELECT CAST(`g` AS double) AS `tmp`
FROM (
SELECT `g`, count(*) AS `count`
FROM alltypes
GROUP BY 1
) t0"""
assert query == expected
def test_scalar_exprs_no_table_refs(self):
expr1 = ibis.now()
expected1 = """\
SELECT now() AS `tmp`"""
expr2 = ibis.literal(1) + ibis.literal(2)
expected2 = """\
SELECT 1 + 2 AS `tmp`"""
cases = [
(expr1, expected1),
(expr2, expected2)
]
for expr, expected in cases:
result = impala.compile(expr)
assert result == expected
def test_expr_list_no_table_refs(self):
exlist = ibis.api.expr_list([ibis.literal(1).name('a'),
ibis.now().name('b'),
ibis.literal(2).log().name('c')])
result = impala.compile(exlist)
expected = """\
SELECT 1 AS `a`, now() AS `b`, ln(2) AS `c`"""
assert result == expected
def test_isnull_case_expr_rewrite_failure(self):
# #172, case expression that was not being properly converted into an
# aggregation
reduction = self.table.g.isnull().ifelse(1, 0).sum()
result = impala.compile(reduction)
expected = """\
SELECT sum(CASE WHEN `g` IS NULL THEN 1 ELSE 0 END) AS `sum`
FROM alltypes"""
assert result == expected
def _get_query(expr):
ast = build_ast(expr, ImpalaDialect.make_context())
return ast.queries[0]
nation = api.table([
('n_regionkey', 'int32'),
('n_nationkey', 'int32'),
('n_name', 'string')
], 'nation')
region = api.table([
('r_regionkey', 'int32'),
('r_name', 'string')
], 'region')
customer = api.table([
('c_nationkey', 'int32'),
('c_name', 'string'),
('c_acctbal', 'double')
], 'customer')
def _table_wrapper(name, tname=None):
@property
def f(self):
return self._table_from_schema(name, tname)
return f
class ExprTestCases(object):
_schemas = {
'foo': [
('job', 'string'),
('dept_id', 'string'),
('year', 'int32'),
('y', 'double')
],
'bar': [
('x', 'double'),
('job', 'string')
],
't1': [
('key1', 'string'),
('key2', 'string'),
('value1', 'double')
],
't2': [
('key1', 'string'),
('key2', 'string')
]
}
def _table_from_schema(self, name, tname=None):
tname = tname or name
return api.table(self._schemas[name], tname)
def _case_multiple_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
predA = t1['foo_id'] == t2['foo_id']
predB = t1['bar_id'] == t3['bar_id']
what = (t1.left_join(t2, [predA])
.inner_join(t3, [predB])
.projection([t1, t2['value1'], t3['value2']]))
return what
def _case_join_between_joins(self):
t1 = api.table([
('key1', 'string'),
('key2', 'string'),
('value1', 'double'),
], 'first')
t2 = api.table([
('key1', 'string'),
('value2', 'double'),
], 'second')
t3 = api.table([
('key2', 'string'),
('key3', 'string'),
('value3', 'double'),
], 'third')
t4 = api.table([
('key3', 'string'),
('value4', 'double')
], 'fourth')
left = t1.inner_join(t2, [('key1', 'key1')])[t1, t2.value2]
right = t3.inner_join(t4, [('key3', 'key3')])[t3, t4.value4]
joined = left.inner_join(right, [('key2', 'key2')])
# At one point, the expression simplification was resulting in bad refs
# here (right.value3 referencing the table inside the right join)
exprs = [left, right.value3, right.value4]
projected = joined.projection(exprs)
return projected
def _case_join_just_materialized(self):
t1 = self.con.table('tpch_nation')
t2 = self.con.table('tpch_region')
t3 = self.con.table('tpch_customer')
# GH #491
return (t1.inner_join(t2, t1.n_regionkey == t2.r_regionkey)
.inner_join(t3, t1.n_nationkey == t3.c_nationkey))
def _case_semi_anti_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
sj = t1.semi_join(t2, [t1.foo_id == t2.foo_id])[[t1]]
aj = t1.anti_join(t2, [t1.foo_id == t2.foo_id])[[t1]]
return sj, aj
def _case_self_reference_simple(self):
t1 = self.con.table('star1')
return t1.view()
def _case_self_reference_join(self):
t1 = self.con.table('star1')
t2 = t1.view()
return t1.inner_join(t2, [t1.foo_id == t2.bar_id])[[t1]]
def _case_join_projection_subquery_bug(self):
# From an observed bug, derived from tpch tables
geo = (nation.inner_join(region, [('n_regionkey', 'r_regionkey')])
[nation.n_nationkey,
nation.n_name.name('nation'),
region.r_name.name('region')])
expr = (geo.inner_join(customer, [('n_nationkey', 'c_nationkey')])
[customer, geo])
return expr
def _case_where_simple_comparisons(self):
t1 = self.con.table('star1')
what = t1.filter([t1.f > 0, t1.c < t1.f * 2])
return what
def _case_where_with_join(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# This also tests some cases of predicate pushdown
e1 = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
.projection([t1, t2.value1, t2.value3])
.filter([t1.f > 0, t2.value3 < 1000]))
# e2 = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
# .filter([t1.f > 0, t2.value3 < 1000])
# .projection([t1, t2.value1, t2.value3]))
# return e1, e2
return e1
def _case_subquery_used_for_self_join(self):
# There could be cases that should look in SQL like
# WITH t0 as (some subquery)
# select ...
# from t0 t1
# join t0 t2
# on t1.kind = t2.subkind
# ...
# However, the Ibis code will simply have an expression (projection or
# aggregation, say) built on top of the subquery expression, so we need
# to extract the subquery unit (we see that it appears multiple times
# in the tree).
t = self.con.table('alltypes')
agged = t.aggregate([t.f.sum().name('total')], by=['g', 'a', 'b'])
view = agged.view()
metrics = [(agged.total - view.total).max().name('metric')]
expr = (agged.inner_join(view, [agged.a == view.b])
.aggregate(metrics, by=[agged.g]))
return expr
def _case_subquery_factor_correlated_subquery(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate
.cast('timestamp').name('odate')]
tpch = (region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)
[fields_of_interest])
# Self-reference + correlated subquery complicates things
t2 = tpch.view()
conditional_avg = t2[t2.region == tpch.region].amount.mean()
amount_filter = tpch.amount > conditional_avg
return tpch[amount_filter].limit(10)
def _case_self_join_subquery_distinct_equal(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
j1 = (region.join(nation, region.r_regionkey == nation.n_regionkey)
[region, nation])
j2 = (region.join(nation, region.r_regionkey == nation.n_regionkey)
[region, nation].view())
expr = (j1.join(j2, j1.r_regionkey == j2.r_regionkey)
[j1.r_name, j2.n_name])
return expr
def _case_cte_factor_distinct_but_equal(self):
t = self.con.table('alltypes')
tt = self.con.table('alltypes')
expr1 = t.group_by('g').aggregate(t.f.sum().name('metric'))
expr2 = tt.group_by('g').aggregate(tt.f.sum().name('metric')).view()
expr = expr1.join(expr2, expr1.g == expr2.g)[[expr1]]
return expr
def _case_tpch_self_join_failure(self):
# duplicating the integration test here
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [
region.r_name.name('region'),
nation.n_name.name('nation'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate')]
joined_all = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)
[fields_of_interest])
year = joined_all.odate.year().name('year')
total = joined_all.amount.sum().cast('double').name('total')
annual_amounts = (joined_all
.group_by(['region', year])
.aggregate(total))
current = annual_amounts
prior = annual_amounts.view()
yoy_change = (current.total - prior.total).name('yoy_change')
yoy = (current.join(prior, current.year == (prior.year - 1))
[current.region, current.year, yoy_change])
return yoy
def _case_subquery_in_filter_predicate(self):
# E.g. comparing against some scalar aggregate value. See Ibis #43
t1 = self.con.table('star1')
pred = t1.f > t1.f.mean()
expr = t1[pred]
# This brought out another expression rewriting bug, since the filtered
# table isn't found elsewhere in the expression.
pred2 = t1.f > t1[t1.foo_id == 'foo'].f.mean()
expr2 = t1[pred2]
return expr, expr2
def _case_filter_subquery_derived_reduction(self):
t1 = self.con.table('star1')
# Reduction can be nested inside some scalar expression
pred3 = t1.f > t1[t1.foo_id == 'foo'].f.mean().log()
pred4 = t1.f > (t1[t1.foo_id == 'foo'].f.mean().log() + 1)
expr3 = t1[pred3]
expr4 = t1[pred4]
return expr3, expr4
def _case_topk_operation(self):
# TODO: top K with filter in place
table = api.table([
('foo', 'string'),
('bar', 'string'),
('city', 'string'),
('v1', 'double'),
('v2', 'double'),
], 'tbl')
what = table.city.topk(10, by=table.v2.mean())
e1 = table[what]
# Test the default metric (count)
what = table.city.topk(10)
e2 = table[what]
return e1, e2
def _case_simple_aggregate_query(self):
t1 = self.con.table('star1')
cases = [
t1.aggregate([t1['f'].sum().name('total')],
[t1['foo_id']]),
t1.aggregate([t1['f'].sum().name('total')],
['foo_id', 'bar_id'])
]
return cases
def _case_aggregate_having(self):
# Filtering post-aggregation predicate
t1 = self.con.table('star1')
total = t1.f.sum().name('total')
metrics = [total]
e1 = t1.aggregate(metrics, by=['foo_id'], having=[total > 10])
e2 = t1.aggregate(metrics, by=['foo_id'], having=[t1.count() > 100])
return e1, e2
def _case_aggregate_count_joined(self):
# count on more complicated table
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
join_expr = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, join_expr)
table_ref = joined[nation, region.r_name.name('region')]
return table_ref.count()
def _case_sort_by(self):
table = self.con.table('star1')
return [
table.sort_by('f'),
table.sort_by(('f', 0)),
table.sort_by(['c', ('f', 0)])
]
def _case_limit(self):
star1 = self.con.table('star1')
cases = [
star1.limit(10),
star1.limit(10, offset=5),
star1[star1.f > 0].limit(10),
# Semantically, this should produce a subquery
star1.limit(10)[lambda x: x.f > 0]
]
return cases
foo = _table_wrapper('foo')
bar = _table_wrapper('bar')
t1 = _table_wrapper('t1', 'foo')
t2 = _table_wrapper('t2', 'bar')
def _case_where_uncorrelated_subquery(self):
return self.foo[self.foo.job.isin(self.bar.job)]
def _case_where_correlated_subquery(self):
t1 = self.foo
t2 = t1.view()
stat = t2[t1.dept_id == t2.dept_id].y.mean()
return t1[t1.y > stat]
def _case_exists(self):
t1, t2 = self.t1, self.t2
cond = (t1.key1 == t2.key1).any()
expr = t1[cond]
cond2 = ((t1.key1 == t2.key1) & (t2.key2 == 'foo')).any()
expr2 = t1[cond2]
return expr, expr2
def _case_not_exists(self):
t1, t2 = self.t1, self.t2
cond = (t1.key1 == t2.key1).any()
return t1[-cond]
def _case_join_with_limited_table(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
limited = t1.limit(100)
joined = (limited.inner_join(t2, [limited.foo_id == t2.foo_id])
[[limited]])
return joined
def _case_union(self, distinct=False):
table = self.con.table('functional_alltypes')
t1 = (table[table.int_col > 0]
[table.string_col.name('key'),
table.float_col.cast('double').name('value')])
t2 = (table[table.int_col <= 0]
[table.string_col.name('key'),
table.double_col.name('value')])
expr = t1.union(t2, distinct=distinct)
return expr
def _case_simple_case(self):
t = self.con.table('alltypes')
return (t.g.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default')
.end())
def _case_search_case(self):
t = self.con.table('alltypes')
return (ibis.case()
.when(t.f > 0, t.d * 2)
.when(t.c < 0, t.a * 2)
.end())
def _case_self_reference_in_exists(self):
t = self.con.table('functional_alltypes')
t2 = t.view()
cond = (t.string_col == t2.string_col).any()
semi = t[cond]
anti = t[-cond]
return semi, anti
def _case_self_reference_limit_exists(self):
alltypes = self.con.table('functional_alltypes')
t = alltypes.limit(100)
t2 = t.view()
return t[-(t.string_col == t2.string_col).any()]
def _case_limit_cte_extract(self):
alltypes = self.con.table('functional_alltypes')
t = alltypes.limit(100)
t2 = t.view()
return t.join(t2).projection(t)
def _case_subquery_aliased(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
agged = t1.aggregate([t1.f.sum().name('total')], by=['foo_id'])
what = (agged.inner_join(t2, [agged.foo_id == t2.foo_id])
[agged, t2.value1])
return what
def _case_filter_self_join_analysis_bug(self):
purchases = ibis.table([('region', 'string'),
('kind', 'string'),
('user', 'int64'),
('amount', 'double')], 'purchases')
metric = purchases.amount.sum().name('total')
agged = (purchases.group_by(['region', 'kind'])
.aggregate(metric))
left = agged[agged.kind == 'foo']
right = agged[agged.kind == 'bar']
joined = left.join(right, left.region == right.region)
result = joined[left.region,
(left.total - right.total).name('diff')]
return result, purchases
def _case_projection_fuse_filter(self):
# Probably test this during the evaluation phase. In SQL, "fusable"
# table operations will be combined together into a single select
# statement
#
# see ibis #71 for more on this
t = ibis.table([
('a', 'int8'),
('b', 'int16'),
('c', 'int32'),
('d', 'int64'),
('e', 'float'),
('f', 'double'),
('g', 'string'),
('h', 'boolean')
], 'foo')
proj = t['a', 'b', 'c']
# Rewrite a little more aggressively here
expr1 = proj[t.a > 0]
# at one point these yielded different results
filtered = t[t.a > 0]
expr2 = filtered[t.a, t.b, t.c]
expr3 = filtered.projection(['a', 'b', 'c'])
return expr1, expr2, expr3
class TestSelectSQL(unittest.TestCase, ExprTestCases):
@classmethod
def setUpClass(cls):
cls.con = MockConnection()
def _compare_sql(self, expr, expected):
result = to_sql(expr)
assert result == expected
def test_nameless_table(self):
# Generate a unique table name when we haven't passed on
nameless = api.table([('key', 'string')])
assert to_sql(nameless) == 'SELECT *\nFROM {}'.format(
nameless.op().name
)
with_name = api.table([('key', 'string')], name='baz')
result = to_sql(with_name)
assert result == 'SELECT *\nFROM baz'
def test_physical_table_reference_translate(self):
# If an expression's table leaves all reference database tables, verify
# we translate correctly
table = self.con.table('alltypes')
query = _get_query(table)
sql_string = query.compile()
expected = "SELECT *\nFROM alltypes"
assert sql_string == expected
def test_simple_joins(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
pred = t1['foo_id'] == t2['foo_id']
pred2 = t1['bar_id'] == t2['foo_id']
cases = [
(t1.inner_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""),
(t1.left_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""),
(t1.outer_join(t2, [pred])[[t1]],
"""SELECT t0.*
FROM star1 t0
FULL OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""),
# multiple predicates
(t1.inner_join(t2, [pred, pred2])[[t1]],
"""SELECT t0.*
FROM star1 t0
INNER JOIN star2 t1
ON (t0.`foo_id` = t1.`foo_id`) AND
(t0.`bar_id` = t1.`foo_id`)"""),
]
for expr, expected_sql in cases:
result_sql = to_sql(expr)
assert result_sql == expected_sql
def test_multiple_joins(self):
what = self._case_multiple_joins()
result_sql = to_sql(what)
expected_sql = """SELECT t0.*, t1.`value1`, t2.`value2`
FROM star1 t0
LEFT OUTER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
INNER JOIN star3 t2
ON t0.`bar_id` = t2.`bar_id`"""
assert result_sql == expected_sql
def test_join_between_joins(self):
projected = self._case_join_between_joins()
result = to_sql(projected)
expected = """SELECT t0.*, t1.`value3`, t1.`value4`
FROM (
SELECT t2.*, t3.`value2`
FROM `first` t2
INNER JOIN second t3
ON t2.`key1` = t3.`key1`
) t0
INNER JOIN (
SELECT t2.*, t3.`value4`
FROM third t2
INNER JOIN fourth t3
ON t2.`key3` = t3.`key3`
) t1
ON t0.`key2` = t1.`key2`"""
assert result == expected
def test_join_just_materialized(self):
joined = self._case_join_just_materialized()
result = to_sql(joined)
expected = """SELECT *
FROM tpch_nation t0
INNER JOIN tpch_region t1
ON t0.`n_regionkey` = t1.`r_regionkey`
INNER JOIN tpch_customer t2
ON t0.`n_nationkey` = t2.`c_nationkey`"""
assert result == expected
result = to_sql(joined.materialize())
assert result == expected
def test_join_no_predicates_for_impala(self):
# Impala requires that joins without predicates be written explicitly
# as CROSS JOIN, since result sets can accidentally get too large if a
# query is executed before predicates are written
t1 = self.con.table('star1')
t2 = self.con.table('star2')
joined2 = t1.cross_join(t2)[[t1]]
expected = """SELECT t0.*
FROM star1 t0
CROSS JOIN star2 t1"""
result2 = to_sql(joined2)
assert result2 == expected
for jtype in ['inner_join', 'left_join', 'outer_join']:
joined = getattr(t1, jtype)(t2)[[t1]]
result = to_sql(joined)
assert result == expected
def test_semi_anti_joins(self):
sj, aj = self._case_semi_anti_joins()
result = to_sql(sj)
expected = """SELECT t0.*
FROM star1 t0
LEFT SEMI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
result = to_sql(aj)
expected = """SELECT t0.*
FROM star1 t0
LEFT ANTI JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_self_reference_simple(self):
expr = self._case_self_reference_simple()
result_sql = to_sql(expr)
expected_sql = "SELECT *\nFROM star1"
assert result_sql == expected_sql
def test_join_self_reference(self):
result = self._case_self_reference_join()
result_sql = to_sql(result)
expected_sql = """SELECT t0.*
FROM star1 t0
INNER JOIN star1 t1
ON t0.`foo_id` = t1.`bar_id`"""
assert result_sql == expected_sql
def test_join_projection_subquery_broken_alias(self):
expr = self._case_join_projection_subquery_bug()
result = to_sql(expr)
expected = """SELECT t1.*, t0.*
FROM (
SELECT t2.`n_nationkey`, t2.`n_name` AS `nation`, t3.`r_name` AS `region`
FROM nation t2
INNER JOIN region t3
ON t2.`n_regionkey` = t3.`r_regionkey`
) t0
INNER JOIN customer t1
ON t0.`n_nationkey` = t1.`c_nationkey`"""
assert result == expected
def test_where_simple_comparisons(self):
what = self._case_where_simple_comparisons()
result = to_sql(what)
expected = """SELECT *
FROM star1
WHERE (`f` > 0) AND
(`c` < (`f` * 2))"""
assert result == expected
def test_where_in_array_literal(self):
# e.g.
# where string_col in (v1, v2, v3)
raise unittest.SkipTest
def test_where_with_join(self):
e1 = self._case_where_with_join()
expected_sql = """SELECT t0.*, t1.`value1`, t1.`value3`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE (t0.`f` > 0) AND
(t1.`value3` < 1000)"""
result_sql = to_sql(e1)
assert result_sql == expected_sql
# result2_sql = to_sql(e2)
# assert result2_sql == expected_sql
def test_where_no_pushdown_possible(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
joined = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
[t1, (t1.f - t2.value1).name('diff')])
filtered = joined[joined.diff > 1]
# TODO: I'm not sure if this is exactly what we want
expected_sql = """SELECT *
FROM (
SELECT t0.*, t0.`f` - t1.`value1` AS `diff`
FROM star1 t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`
WHERE t0.`f` > 0 AND
t1.`value3` < 1000
)
WHERE `diff` > 1"""
raise unittest.SkipTest
result_sql = to_sql(filtered)
assert result_sql == expected_sql
def test_where_with_between(self):
t = self.con.table('alltypes')
what = t.filter([t.a > 0, t.f.between(0, 1)])
result = to_sql(what)
expected = """SELECT *
FROM alltypes
WHERE (`a` > 0) AND
(`f` BETWEEN 0 AND 1)"""
assert result == expected
def test_where_analyze_scalar_op(self):
# root cause of #310
table = self.con.table('functional_alltypes')
expr = (table.filter([table.timestamp_col <
(ibis.timestamp('2010-01-01') + ibis.month(3)),
table.timestamp_col < (ibis.now() +
ibis.day(10))])
.count())
result = to_sql(expr)
expected = """\
SELECT count(*) AS `count`
FROM functional_alltypes
WHERE (`timestamp_col` < date_add(cast({} as timestamp), INTERVAL 3 MONTH)) AND
(`timestamp_col` < date_add(cast(now() as timestamp), INTERVAL 10 DAY))""" # noqa: E501
assert result == expected.format("'2010-01-01 00:00:00'")
def test_bug_duplicated_where(self):
# GH #539
table = self.con.table('airlines')
t = table['arrdelay', 'dest']
expr = (t.group_by('dest')
.mutate(dest_avg=t.arrdelay.mean(),
dev=t.arrdelay - t.arrdelay.mean()))
tmp1 = expr[expr.dev.notnull()]
tmp2 = tmp1.sort_by(ibis.desc('dev'))
worst = tmp2.limit(10)
result = to_sql(worst)
# TODO(cpcloud): We should be able to flatten the second subquery into
# the first
expected = """\
SELECT t0.*
FROM (
SELECT *, avg(`arrdelay`) OVER (PARTITION BY `dest`) AS `dest_avg`,
`arrdelay` - avg(`arrdelay`) OVER (PARTITION BY `dest`) AS `dev`
FROM (
SELECT `arrdelay`, `dest`
FROM airlines
) t2
) t0
WHERE t0.`dev` IS NOT NULL
ORDER BY t0.`dev` DESC
LIMIT 10"""
assert result == expected
def test_simple_aggregate_query(self):
expected = [
"""SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1""",
"""SELECT `foo_id`, `bar_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1, 2"""
]
cases = self._case_simple_aggregate_query()
for expr, expected_sql in zip(cases, expected):
result_sql = to_sql(expr)
assert result_sql == expected_sql
def test_aggregate_having(self):
e1, e2 = self._case_aggregate_having()
result = to_sql(e1)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING sum(`f`) > 10"""
assert result == expected
result = to_sql(e2)
expected = """SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
HAVING count(*) > 100"""
assert result == expected
def test_aggregate_table_count_metric(self):
expr = self.con.table('star1').count()
result = to_sql(expr)
expected = """SELECT count(*) AS `count`
FROM star1"""
assert result == expected
def test_aggregate_count_joined(self):
expr = self._case_aggregate_count_joined()
result = to_sql(expr)
expected = """SELECT count(*) AS `count`
FROM (
SELECT t2.*, t1.`r_name` AS `region`
FROM tpch_region t1
INNER JOIN tpch_nation t2
ON t1.`r_regionkey` = t2.`n_regionkey`
) t0"""
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_expr_template_field_name_binding(self):
# Given an expression with no concrete links to actual database tables,
# indicate a mapping between the distinct unbound table leaves of the
# expression and some database tables with compatible schemas but
# potentially different column names
assert False
def test_no_aliases_needed(self):
table = api.table([
('key1', 'string'),
('key2', 'string'),
('value', 'double')
])
expr = table.aggregate([table['value'].sum().name('total')],
by=['key1', 'key2'])
query = _get_query(expr)
context = query.context
assert not context.need_aliases()
def test_table_names_overlap_default_aliases(self):
# see discussion in #104; this actually is not needed for query
# correctness, and only makes the generated SQL nicer
raise unittest.SkipTest
t0 = api.table([
('key', 'string'),
('v1', 'double')
], 't1')
t1 = api.table([
('key', 'string'),
('v2', 'double')
], 't0')
expr = t0.join(t1, t0.key == t1.key)[t0.key, t0.v1, t1.v2]
result = to_sql(expr)
expected = """\
SELECT t2.`key`, t2.`v1`, t3.`v2`
FROM t0 t2
INNER JOIN t1 t3
ON t2.`key` = t3.`key`"""
assert result == expected
def test_context_aliases_multiple_join(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
expr = (t1.left_join(t2, [t1['foo_id'] == t2['foo_id']])
.inner_join(t3, [t1['bar_id'] == t3['bar_id']])
[[t1, t2['value1'], t3['value2']]])
query = _get_query(expr)
context = query.context
assert context.get_ref(t1) == 't0'
assert context.get_ref(t2) == 't1'
assert context.get_ref(t3) == 't2'
def test_fuse_projections(self):
table = api.table([
('foo', 'int32'),
('bar', 'int64'),
('value', 'double')
], name='tbl')
# Cases where we project in both cases using the base table reference
f1 = (table['foo'] + table['bar']).name('baz')
pred = table['value'] > 0
table2 = table[table, f1]
table2_filtered = table2[pred]
f2 = (table2['foo'] * 2).name('qux')
f3 = (table['foo'] * 2).name('qux')
table3 = table2.projection([table2, f2])
# fusion works even if there's a filter
table3_filtered = table2_filtered.projection([table2, f2])
expected = table[table, f1, f3]
expected2 = table[pred][table, f1, f3]
assert table3.equals(expected)
assert table3_filtered.equals(expected2)
ex_sql = """SELECT *, `foo` + `bar` AS `baz`, `foo` * 2 AS `qux`
FROM tbl"""
ex_sql2 = """SELECT *, `foo` + `bar` AS `baz`, `foo` * 2 AS `qux`
FROM tbl
WHERE `value` > 0"""
table3_sql = to_sql(table3)
table3_filt_sql = to_sql(table3_filtered)
assert table3_sql == ex_sql
assert table3_filt_sql == ex_sql2
# Use the intermediate table refs
table3 = table2.projection([table2, f2])
# fusion works even if there's a filter
table3_filtered = table2_filtered.projection([table2, f2])
expected = table[table, f1, f3]
expected2 = table[pred][table, f1, f3]
assert table3.equals(expected)
assert table3_filtered.equals(expected2)
def test_projection_filter_fuse(self):
expr1, expr2, expr3 = self._case_projection_fuse_filter()
sql1 = to_sql(expr1)
sql2 = to_sql(expr2)
sql3 = to_sql(expr3)
assert sql1 == sql2
assert sql1 == sql3
def test_bug_project_multiple_times(self):
# 108
con = self.con
customer = con.table('tpch_customer')
nation = con.table('tpch_nation')
region = con.table('tpch_region')
joined = (
customer.inner_join(nation,
[customer.c_nationkey == nation.n_nationkey])
.inner_join(region,
[nation.n_regionkey == region.r_regionkey])
)
proj1 = [customer, nation.n_name, region.r_name]
step1 = joined[proj1]
topk_by = step1.c_acctbal.cast('double').sum()
pred = step1.n_name.topk(10, by=topk_by)
proj_exprs = [step1.c_name, step1.r_name, step1.n_name]
step2 = step1[pred]
expr = step2.projection(proj_exprs)
# it works!
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.`n_name`, t4.`r_name`
FROM tpch_customer t2
INNER JOIN tpch_nation t3
ON t2.`c_nationkey` = t3.`n_nationkey`
INNER JOIN tpch_region t4
ON t3.`n_regionkey` = t4.`r_regionkey`
)
SELECT `c_name`, `r_name`, `n_name`
FROM t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `n_name`, sum(CAST(`c_acctbal` AS double)) AS `sum`
FROM t0
GROUP BY 1
) t2
ORDER BY `sum` DESC
LIMIT 10
) t1
ON t0.`n_name` = t1.`n_name`"""
assert result == expected
def test_aggregate_projection_subquery(self):
t = self.con.table('alltypes')
proj = t[t.f > 0][t, (t.a + t.b).name('foo')]
result = to_sql(proj)
expected = """SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0"""
assert result == expected
def agg(x):
return x.aggregate([x.foo.sum().name('foo total')], by=['g'])
# predicate gets pushed down
filtered = proj[proj.g == 'bar']
result = to_sql(filtered)
expected = """SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE (`f` > 0) AND
(`g` = 'bar')"""
assert result == expected
agged = agg(filtered)
result = to_sql(agged)
expected = """SELECT `g`, sum(`foo`) AS `foo total`
FROM (
SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE (`f` > 0) AND
(`g` = 'bar')
) t0
GROUP BY 1"""
assert result == expected
# Pushdown is not possible (in Impala, Postgres, others)
agged2 = agg(proj[proj.foo < 10])
result = to_sql(agged2)
expected = """SELECT `g`, sum(`foo`) AS `foo total`
FROM (
SELECT *, `a` + `b` AS `foo`
FROM alltypes
WHERE `f` > 0
) t0
WHERE `foo` < 10
GROUP BY 1"""
assert result == expected
def test_subquery_aliased(self):
case = self._case_subquery_aliased()
expected = """SELECT t0.*, t1.`value1`
FROM (
SELECT `foo_id`, sum(`f`) AS `total`
FROM star1
GROUP BY 1
) t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
self._compare_sql(case, expected)
def test_double_nested_subquery_no_aliases(self):
# We don't require any table aliasing anywhere
t = api.table([
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value', 'double')
], 'foo_table')
agg1 = t.aggregate([t.value.sum().name('total')],
by=['key1', 'key2', 'key3'])
agg2 = agg1.aggregate([agg1.total.sum().name('total')],
by=['key1', 'key2'])
agg3 = agg2.aggregate([agg2.total.sum().name('total')],
by=['key1'])
result = to_sql(agg3)
expected = """SELECT `key1`, sum(`total`) AS `total`
FROM (
SELECT `key1`, `key2`, sum(`total`) AS `total`
FROM (
SELECT `key1`, `key2`, `key3`, sum(`value`) AS `total`
FROM foo_table
GROUP BY 1, 2, 3
) t1
GROUP BY 1, 2
) t0
GROUP BY 1"""
assert result == expected
def test_aggregate_projection_alias_bug(self):
# Observed in use
t1 = self.con.table('star1')
t2 = self.con.table('star2')
what = (t1.inner_join(t2, [t1.foo_id == t2.foo_id])
[[t1, t2.value1]])
what = what.aggregate([what.value1.sum().name('total')],
by=[what.foo_id])
# TODO: Not fusing the aggregation with the projection yet
result = to_sql(what)
expected = """SELECT `foo_id`, sum(`value1`) AS `total`
FROM (
SELECT t1.*, t2.`value1`
FROM star1 t1
INNER JOIN star2 t2
ON t1.`foo_id` = t2.`foo_id`
) t0
GROUP BY 1"""
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_aggregate_fuse_with_projection(self):
# see above test case
assert False
def test_subquery_used_for_self_join(self):
expr = self._case_subquery_used_for_self_join()
result = to_sql(expr)
expected = """WITH t0 AS (
SELECT `g`, `a`, `b`, sum(`f`) AS `total`
FROM alltypes
GROUP BY 1, 2, 3
)
SELECT t0.`g`, max(t0.`total` - t1.`total`) AS `metric`
FROM t0
INNER JOIN t0 t1
ON t0.`a` = t1.`b`
GROUP BY 1"""
assert result == expected
def test_subquery_in_union(self):
t = self.con.table('alltypes')
expr1 = t.group_by(['a', 'g']).aggregate(t.f.sum().name('metric'))
expr2 = expr1.view()
join1 = expr1.join(expr2, expr1.g == expr2.g)[[expr1]]
join2 = join1.view()
expr = join1.union(join2)
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT `a`, `g`, sum(`f`) AS `metric`
FROM alltypes
GROUP BY 1, 2
),
t1 AS (
SELECT t0.*
FROM t0
INNER JOIN t0 t3
ON t0.`g` = t3.`g`
)
SELECT *
FROM t1
UNION ALL
SELECT t0.*
FROM t0
INNER JOIN t0 t3
ON t0.`g` = t3.`g`"""
assert result == expected
def test_subquery_factor_correlated_subquery(self):
# #173, #183 and other issues
expr = self._case_subquery_factor_correlated_subquery()
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t6.*, t1.`r_name` AS `region`, t3.`o_totalprice` AS `amount`,
CAST(t3.`o_orderdate` AS timestamp) AS `odate`
FROM tpch_region t1
INNER JOIN tpch_nation t2
ON t1.`r_regionkey` = t2.`n_regionkey`
INNER JOIN tpch_customer t6
ON t6.`c_nationkey` = t2.`n_nationkey`
INNER JOIN tpch_orders t3
ON t3.`o_custkey` = t6.`c_custkey`
)
SELECT t0.*
FROM t0
WHERE t0.`amount` > (
SELECT avg(t4.`amount`) AS `mean`
FROM t0 t4
WHERE t4.`region` = t0.`region`
)
LIMIT 10"""
assert result == expected
def test_self_join_subquery_distinct_equal(self):
expr = self._case_self_join_subquery_distinct_equal()
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.*
FROM tpch_region t2
INNER JOIN tpch_nation t3
ON t2.`r_regionkey` = t3.`n_regionkey`
)
SELECT t0.`r_name`, t1.`n_name`
FROM t0
INNER JOIN t0 t1
ON t0.`r_regionkey` = t1.`r_regionkey`"""
assert result == expected
def test_limit_with_self_join(self):
t = self.con.table('functional_alltypes')
t2 = t.view()
expr = t.join(t2, t.tinyint_col < t2.timestamp_col.minute()).count()
# it works
result = to_sql(expr)
expected = """\
SELECT count(*) AS `count`
FROM functional_alltypes t0
INNER JOIN functional_alltypes t1
ON t0.`tinyint_col` < extract(t1.`timestamp_col`, 'minute')"""
assert result == expected
def test_cte_factor_distinct_but_equal(self):
expr = self._case_cte_factor_distinct_but_equal()
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT `g`, sum(`f`) AS `metric`
FROM alltypes
GROUP BY 1
)
SELECT t0.*
FROM t0
INNER JOIN t0 t1
ON t0.`g` = t1.`g`"""
assert result == expected
def test_tpch_self_join_failure(self):
yoy = self._case_tpch_self_join_failure()
to_sql(yoy)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_extract_subquery_nested_lower(self):
# We may have a join between two tables requiring subqueries, and
# buried inside these there may be a common subquery. Let's test that
# we find it and pull it out to the top level to avoid repeating
# ourselves.
assert False
def test_subquery_in_filter_predicate(self):
expr, expr2 = self._case_subquery_in_filter_predicate()
result = to_sql(expr)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT avg(`f`) AS `mean`
FROM star1
)"""
assert result == expected
result = to_sql(expr2)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT avg(`f`) AS `mean`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
def test_filter_subquery_derived_reduction(self):
expr3, expr4 = self._case_filter_subquery_derived_reduction()
result = to_sql(expr3)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT ln(avg(`f`)) AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
result = to_sql(expr4)
expected = """SELECT *
FROM star1
WHERE `f` > (
SELECT ln(avg(`f`)) + 1 AS `tmp`
FROM star1
WHERE `foo_id` = 'foo'
)"""
assert result == expected
def test_topk_operation(self):
filtered, filtered2 = self._case_topk_operation()
query = to_sql(filtered)
expected = """SELECT t0.*
FROM tbl t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `city`, avg(`v2`) AS `mean`
FROM tbl
GROUP BY 1
) t2
ORDER BY `mean` DESC
LIMIT 10
) t1
ON t0.`city` = t1.`city`"""
assert query == expected
query = to_sql(filtered2)
expected = """SELECT t0.*
FROM tbl t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `city`, count(`city`) AS `count`
FROM tbl
GROUP BY 1
) t2
ORDER BY `count` DESC
LIMIT 10
) t1
ON t0.`city` = t1.`city`"""
assert query == expected
def test_topk_predicate_pushdown_bug(self):
# Observed on TPCH data
cplusgeo = (
customer.inner_join(nation, [customer.c_nationkey ==
nation.n_nationkey])
.inner_join(region, [nation.n_regionkey ==
region.r_regionkey])
[customer, nation.n_name, region.r_name])
pred = cplusgeo.n_name.topk(10, by=cplusgeo.c_acctbal.sum())
expr = cplusgeo.filter([pred])
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT t2.*, t3.`n_name`, t4.`r_name`
FROM customer t2
INNER JOIN nation t3
ON t2.`c_nationkey` = t3.`n_nationkey`
INNER JOIN region t4
ON t3.`n_regionkey` = t4.`r_regionkey`
)
SELECT t0.*
FROM t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `n_name`, sum(`c_acctbal`) AS `sum`
FROM t0
GROUP BY 1
) t2
ORDER BY `sum` DESC
LIMIT 10
) t1
ON t0.`n_name` = t1.`n_name`"""
assert result == expected
def test_topk_analysis_bug(self):
# GH #398
airlines = ibis.table([('dest', 'string'),
('origin', 'string'),
('arrdelay', 'int32')], 'airlines')
dests = ['ORD', 'JFK', 'SFO']
dests_formatted = repr(tuple(set(dests)))
delay_filter = airlines.dest.topk(10, by=airlines.arrdelay.mean())
t = airlines[airlines.dest.isin(dests)]
expr = t[delay_filter].group_by('origin').size()
result = to_sql(expr)
expected = """\
SELECT t0.`origin`, count(*) AS `count`
FROM airlines t0
LEFT SEMI JOIN (
SELECT *
FROM (
SELECT `dest`, avg(`arrdelay`) AS `mean`
FROM airlines
GROUP BY 1
) t2
ORDER BY `mean` DESC
LIMIT 10
) t1
ON t0.`dest` = t1.`dest`
WHERE t0.`dest` IN {}
GROUP BY 1""".format(dests_formatted)
assert result == expected
def test_topk_to_aggregate(self):
t = ibis.table([('dest', 'string'),
('origin', 'string'),
('arrdelay', 'int32')], 'airlines')
top = t.dest.topk(10, by=t.arrdelay.mean())
result = to_sql(top)
expected = to_sql(top.to_aggregation())
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_bottomk(self):
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_topk_antijoin(self):
# Get the "other" category somehow
assert False
def test_case_in_projection(self):
t = self.con.table('alltypes')
expr = (t.g.case()
.when('foo', 'bar')
.when('baz', 'qux')
.else_('default').end())
expr2 = (api.case()
.when(t.g == 'foo', 'bar')
.when(t.g == 'baz', t.g)
.end())
proj = t[expr.name('col1'), expr2.name('col2'), t]
result = to_sql(proj)
expected = """SELECT
CASE `g`
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END AS `col1`,
CASE
WHEN `g` = 'foo' THEN 'bar'
WHEN `g` = 'baz' THEN `g`
ELSE NULL
END AS `col2`, *
FROM alltypes"""
assert result == expected
def test_identifier_quoting(self):
data = api.table([
('date', 'int32'),
('explain', 'string')
], 'table')
expr = data[data.date.name('else'), data.explain.name('join')]
result = to_sql(expr)
expected = """SELECT `date` AS `else`, `explain` AS `join`
FROM `table`"""
assert result == expected
def test_scalar_subquery_different_table(self):
t1, t2 = self.foo, self.bar
expr = t1[t1.y > t2.x.max()]
result = to_sql(expr)
expected = """SELECT *
FROM foo
WHERE `y` > (
SELECT max(`x`) AS `max`
FROM bar
)"""
assert result == expected
def test_where_uncorrelated_subquery(self):
expr = self._case_where_uncorrelated_subquery()
result = to_sql(expr)
expected = """SELECT *
FROM foo
WHERE `job` IN (
SELECT `job`
FROM bar
)"""
assert result == expected
def test_where_correlated_subquery(self):
expr = self._case_where_correlated_subquery()
result = to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE t0.`y` > (
SELECT avg(t1.`y`) AS `mean`
FROM foo t1
WHERE t0.`dept_id` = t1.`dept_id`
)"""
assert result == expected
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_where_array_correlated(self):
# Test membership in some record-dependent values, if this is supported
assert False
def test_exists(self):
e1, e2 = self._case_exists()
result = to_sql(e1)
expected = """SELECT t0.*
FROM foo t0
WHERE EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1`
)"""
assert result == expected
result = to_sql(e2)
expected = """SELECT t0.*
FROM foo t0
WHERE EXISTS (
SELECT 1
FROM bar t1
WHERE (t0.`key1` = t1.`key1`) AND
(t1.`key2` = 'foo')
)"""
assert result == expected
def test_exists_subquery_repr(self):
# GH #660
t1, t2 = self.t1, self.t2
cond = t1.key1 == t2.key1
expr = t1[cond.any()]
stmt = _get_query(expr)
repr(stmt.where[0])
def test_not_exists(self):
expr = self._case_not_exists()
result = to_sql(expr)
expected = """SELECT t0.*
FROM foo t0
WHERE NOT EXISTS (
SELECT 1
FROM bar t1
WHERE t0.`key1` = t1.`key1`
)"""
assert result == expected
def test_filter_inside_exists(self):
events = ibis.table([('session_id', 'int64'),
('user_id', 'int64'),
('event_type', 'int32'),
('ts', 'timestamp')], 'events')
purchases = ibis.table([('item_id', 'int64'),
('user_id', 'int64'),
('price', 'double'),
('ts', 'timestamp')], 'purchases')
filt = purchases.ts > '2015-08-15'
cond = (events.user_id == purchases[filt].user_id).any()
expr = events[cond]
result = to_sql(expr)
expected = """\
SELECT t0.*
FROM events t0
WHERE EXISTS (
SELECT 1
FROM (
SELECT *
FROM purchases
WHERE `ts` > '2015-08-15'
) t1
WHERE t0.`user_id` = t1.`user_id`
)"""
assert result == expected
def test_self_reference_in_exists(self):
semi, anti = self._case_self_reference_in_exists()
result = to_sql(semi)
expected = """\
SELECT t0.*
FROM functional_alltypes t0
WHERE EXISTS (
SELECT 1
FROM functional_alltypes t1
WHERE t0.`string_col` = t1.`string_col`
)"""
assert result == expected
result = to_sql(anti)
expected = """\
SELECT t0.*
FROM functional_alltypes t0
WHERE NOT EXISTS (
SELECT 1
FROM functional_alltypes t1
WHERE t0.`string_col` = t1.`string_col`
)"""
assert result == expected
def test_self_reference_limit_exists(self):
case = self._case_self_reference_limit_exists()
expected = """\
WITH t0 AS (
SELECT *
FROM functional_alltypes
LIMIT 100
)
SELECT *
FROM t0
WHERE NOT EXISTS (
SELECT 1
FROM t0 t1
WHERE t0.`string_col` = t1.`string_col`
)"""
self._compare_sql(case, expected)
def test_limit_cte_extract(self):
case = self._case_limit_cte_extract()
expected = """\
WITH t0 AS (
SELECT *
FROM functional_alltypes
LIMIT 100
)
SELECT t0.*
FROM t0
CROSS JOIN t0 t1"""
self._compare_sql(case, expected)
def test_sort_by(self):
cases = self._case_sort_by()
expected = [
"""SELECT *
FROM star1
ORDER BY `f`""",
"""SELECT *
FROM star1
ORDER BY `f` DESC""",
"""SELECT *
FROM star1
ORDER BY `c`, `f` DESC"""
]
for case, ex in zip(cases, expected):
result = to_sql(case)
assert result == ex
def test_limit(self):
cases = self._case_limit()
expected = [
"""SELECT *
FROM star1
LIMIT 10""",
"""SELECT *
FROM star1
LIMIT 10 OFFSET 5""",
"""SELECT *
FROM star1
WHERE `f` > 0
LIMIT 10""",
"""SELECT *
FROM (
SELECT *
FROM star1
LIMIT 10
) t0
WHERE `f` > 0"""
]
for case, ex in zip(cases, expected):
result = to_sql(case)
assert result == ex
def test_join_with_limited_table(self):
joined = self._case_join_with_limited_table()
result = to_sql(joined)
expected = """SELECT t0.*
FROM (
SELECT *
FROM star1
LIMIT 100
) t0
INNER JOIN star2 t1
ON t0.`foo_id` = t1.`foo_id`"""
assert result == expected
def test_sort_by_on_limit_yield_subquery(self):
# x.limit(...).sort_by(...)
# is semantically different from
# x.sort_by(...).limit(...)
# and will often yield different results
t = self.con.table('functional_alltypes')
expr = (t.group_by('string_col')
.aggregate([t.count().name('nrows')])
.limit(5)
.sort_by('string_col'))
result = to_sql(expr)
expected = """SELECT *
FROM (
SELECT `string_col`, count(*) AS `nrows`
FROM functional_alltypes
GROUP BY 1
LIMIT 5
) t0
ORDER BY `string_col`"""
assert result == expected
def test_multiple_limits(self):
t = self.con.table('functional_alltypes')
expr = t.limit(20).limit(10)
stmt = _get_query(expr)
assert stmt.limit['n'] == 10
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_top_convenience(self):
# x.top(10, by=field)
# x.top(10, by=[field1, field2])
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_self_aggregate_in_predicate(self):
# Per ibis #43
assert False
def test_self_join_filter_analysis_bug(self):
expr, _ = self._case_filter_self_join_analysis_bug()
expected = """\
SELECT t0.`region`, t0.`total` - t1.`total` AS `diff`
FROM (
SELECT `region`, `kind`, sum(`amount`) AS `total`
FROM purchases
WHERE `kind` = 'foo'
GROUP BY 1, 2
) t0
INNER JOIN (
SELECT `region`, `kind`, sum(`amount`) AS `total`
FROM purchases
WHERE `kind` = 'bar'
GROUP BY 1, 2
) t1
ON t0.`region` = t1.`region`"""
self._compare_sql(expr, expected)
def test_join_filtered_tables_no_pushdown(self):
# #790, #781
tbl_a = ibis.table([('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('value_a', 'double')], 'a')
tbl_b = ibis.table([('year', 'int32'),
('month', 'int32'),
('day', 'int32'),
('value_b', 'double')], 'b')
tbl_a_filter = tbl_a.filter([
tbl_a.year == 2016,
tbl_a.month == 2,
tbl_a.day == 29
])
tbl_b_filter = tbl_b.filter([
tbl_b.year == 2016,
tbl_b.month == 2,
tbl_b.day == 29
])
joined = tbl_a_filter.left_join(tbl_b_filter, ['year', 'month', 'day'])
result = joined[tbl_a_filter.value_a, tbl_b_filter.value_b]
join_op = result.op().table.op()
assert join_op.left.equals(tbl_a_filter)
assert join_op.right.equals(tbl_b_filter)
result_sql = ibis.impala.compile(result)
expected_sql = """\
SELECT t0.`value_a`, t1.`value_b`
FROM (
SELECT *
FROM a
WHERE (`year` = 2016) AND
(`month` = 2) AND
(`day` = 29)
) t0
LEFT OUTER JOIN (
SELECT *
FROM b
WHERE (`year` = 2016) AND
(`month` = 2) AND
(`day` = 29)
) t1
ON (t0.`year` = t1.`year`) AND
(t0.`month` = t1.`month`) AND
(t0.`day` = t1.`day`)"""
assert result_sql == expected_sql
def test_loj_subquery_filter_handling(self):
# #781
left = ibis.table([('id', 'int32'), ('desc', 'string')], 'foo')
right = ibis.table([('id', 'int32'), ('desc', 'string')], 'bar')
left = left[left.id < 2]
right = right[right.id < 3]
joined = left.left_join(right, ['id', 'desc'])
joined = joined[
[left[name].name('left_' + name) for name in left.columns] +
[right[name].name('right_' + name) for name in right.columns]
]
result = to_sql(joined)
expected = """\
SELECT t0.`id` AS `left_id`, t0.`desc` AS `left_desc`, t1.`id` AS `right_id`,
t1.`desc` AS `right_desc`
FROM (
SELECT *
FROM foo
WHERE `id` < 2
) t0
LEFT OUTER JOIN (
SELECT *
FROM bar
WHERE `id` < 3
) t1
ON (t0.`id` = t1.`id`) AND
(t0.`desc` = t1.`desc`)"""
assert result == expected
class TestUnions(unittest.TestCase, ExprTestCases):
def setUp(self):
self.con = MockConnection()
def test_union(self):
union1 = self._case_union()
result = to_sql(union1)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION ALL
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_union_distinct(self):
union = self._case_union(distinct=True)
result = to_sql(union)
expected = """\
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0"""
assert result == expected
def test_union_project_column(self):
# select a column, get a subquery
union1 = self._case_union()
expr = union1[[union1.key]]
result = to_sql(expr)
expected = """SELECT `key`
FROM (
SELECT `string_col` AS `key`, CAST(`float_col` AS double) AS `value`
FROM functional_alltypes
WHERE `int_col` > 0
UNION ALL
SELECT `string_col` AS `key`, `double_col` AS `value`
FROM functional_alltypes
WHERE `int_col` <= 0
) t0"""
assert result == expected
class TestDistinct(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
def test_table_distinct(self):
t = self.con.table('functional_alltypes')
expr = t[t.string_col, t.int_col].distinct()
result = to_sql(expr)
expected = """SELECT DISTINCT `string_col`, `int_col`
FROM functional_alltypes"""
assert result == expected
def test_array_distinct(self):
t = self.con.table('functional_alltypes')
expr = t.string_col.distinct()
result = to_sql(expr)
expected = """SELECT DISTINCT `string_col`
FROM functional_alltypes"""
assert result == expected
def test_count_distinct(self):
t = self.con.table('functional_alltypes')
metric = t.int_col.nunique().name('nunique')
expr = t[t.bigint_col > 0].group_by('string_col').aggregate([metric])
result = to_sql(expr)
expected = """\
SELECT `string_col`, count(DISTINCT `int_col`) AS `nunique`
FROM functional_alltypes
WHERE `bigint_col` > 0
GROUP BY 1"""
assert result == expected
def test_multiple_count_distinct(self):
# Impala and some other databases will not execute multiple
# count-distincts in a single aggregation query. This error reporting
# will be left to the database itself, for now.
t = self.con.table('functional_alltypes')
metrics = [t.int_col.nunique().name('int_card'),
t.smallint_col.nunique().name('smallint_card')]
expr = t.group_by('string_col').aggregate(metrics)
result = to_sql(expr)
expected = """\
SELECT `string_col`, count(DISTINCT `int_col`) AS `int_card`,
count(DISTINCT `smallint_col`) AS `smallint_card`
FROM functional_alltypes
GROUP BY 1"""
assert result == expected
def test_pushdown_with_or():
t = ibis.table(
[('double_col', 'double'),
('string_col', 'string'),
('int_col', 'int32'),
('float_col', 'float')],
'functional_alltypes',
)
subset = t[(t.double_col > 3.14) & t.string_col.contains('foo')]
filt = subset[(subset.int_col - 1 == 0) | (subset.float_col <= 1.34)]
result = to_sql(filt)
expected = """\
SELECT *
FROM functional_alltypes
WHERE (`double_col` > 3.14) AND
(locate('foo', `string_col`) - 1 >= 0) AND
(((`int_col` - 1) = 0) OR (`float_col` <= 1.34))"""
assert result == expected
def test_having_size():
t = ibis.table(
[('double_col', 'double'),
('string_col', 'string'),
('int_col', 'int32'),
('float_col', 'float')],
'functional_alltypes',
)
expr = t.group_by(t.string_col).having(t.double_col.max() == 1).size()
result = to_sql(expr)
assert result == """\
SELECT `string_col`, count(*) AS `count`
FROM functional_alltypes
GROUP BY 1
HAVING max(`double_col`) = 1"""
def test_having_from_filter():
t = ibis.table([('a', 'int64'), ('b', 'string')], 't')
filt = t[t.b == 'm']
gb = filt.group_by(filt.b)
having = gb.having(filt.a.max() == 2)
agg = having.aggregate(filt.a.sum().name('sum'))
result = to_sql(agg)
expected = """\
SELECT `b`, sum(`a`) AS `sum`
FROM t
WHERE `b` = 'm'
GROUP BY 1
HAVING max(`a`) = 2"""
assert result == expected
def test_simple_agg_filter():
t = ibis.table([('a', 'int64'), ('b', 'string')], name='my_table')
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max()]
result = to_sql(expr)
expected = """\
SELECT *
FROM (
SELECT *
FROM my_table
WHERE `a` < 100
) t0
WHERE `a` = (
SELECT max(`a`) AS `max`
FROM my_table
WHERE `a` < 100
)"""
assert result == expected
def test_agg_and_non_agg_filter():
t = ibis.table([('a', 'int64'), ('b', 'string')], name='my_table')
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max()]
expr = expr[expr.b == 'a']
result = to_sql(expr)
expected = """\
SELECT *
FROM (
SELECT *
FROM my_table
WHERE `a` < 100
) t0
WHERE (`a` = (
SELECT max(`a`) AS `max`
FROM my_table
WHERE `a` < 100
)) AND
(`b` = 'a')"""
assert result == expected
def test_agg_filter():
t = ibis.table([('a', 'int64'), ('b', 'int64')], name='my_table')
t = t.mutate(b2=t.b * 2)
t = t[['a', 'b2']]
filt = t[t.a < 100]
expr = filt[filt.a == filt.a.max().name('blah')]
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT *, `b` * 2 AS `b2`
FROM my_table
),
t1 AS (
SELECT t0.`a`, t0.`b2`
FROM t0
WHERE t0.`a` < 100
)
SELECT t1.*
FROM t1
WHERE t1.`a` = (
SELECT max(`a`) AS `blah`
FROM t1
)"""
assert result == expected
def test_agg_filter_with_alias():
t = ibis.table([('a', 'int64'), ('b', 'int64')], name='my_table')
t = t.mutate(b2=t.b * 2)
t = t[['a', 'b2']]
filt = t[t.a < 100]
expr = filt[filt.a.name('A') == filt.a.max().name('blah')]
result = to_sql(expr)
expected = """\
WITH t0 AS (
SELECT *, `b` * 2 AS `b2`
FROM my_table
),
t1 AS (
SELECT t0.`a`, t0.`b2`
FROM t0
WHERE t0.`a` < 100
)
SELECT t1.*
FROM t1
WHERE t1.`a` = (
SELECT max(`a`) AS `blah`
FROM t1
)"""
assert result == expected
| apache-2.0 |
hyperspy/hyperspy | hyperspy/drawing/_widgets/range.py | 2 | 22610 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import inspect
import logging
import numpy as np
from matplotlib.widgets import SpanSelector
from hyperspy.drawing.widgets import ResizableDraggableWidgetBase
from hyperspy.events import Events, Event
from hyperspy.defaults_parser import preferences
_logger = logging.getLogger(__name__)
# Track if we have already warned when the widget is out of range
already_warn_out_of_range = False
def in_interval(number, interval):
if interval[0] <= number <= interval[1]:
return True
else:
return False
class RangeWidget(ResizableDraggableWidgetBase):
"""RangeWidget is a span-patch based widget, which can be
dragged and resized by mouse/keys. Basically a wrapper for
ModifiablepanSelector so that it conforms to the common widget interface.
For optimized changes of geometry, the class implements two methods
'set_bounds' and 'set_ibounds', to set the geometry of the rectangle by
value and index space coordinates, respectivly.
Implements the internal method _validate_geometry to make sure the patch
will always stay within bounds.
"""
def __init__(self, axes_manager, ax=None, alpha=0.5, **kwargs):
# Parse all kwargs for the matplotlib SpanSelector
self._SpanSelector_kwargs = {}
for key in inspect.signature(SpanSelector).parameters.keys():
if key in kwargs:
self._SpanSelector_kwargs[key] = kwargs.pop(key)
super(RangeWidget, self).__init__(axes_manager, alpha=alpha, **kwargs)
self.span = None
def set_on(self, value, render_figure=True):
if value is not self.is_on and self.ax is not None:
if value is True:
self._add_patch_to(self.ax)
self.connect(self.ax)
elif value is False:
self.disconnect()
if render_figure:
self.draw_patch()
if value is False:
self.ax = None
self._is_on = value
def _add_patch_to(self, ax):
self.span = ModifiableSpanSelector(ax, **self._SpanSelector_kwargs)
self.span.set_initial(self._get_range())
self.span.bounds_check = True
self.span.snap_position = self.snap_position
self.span.snap_size = self.snap_size
self.span.can_switch = True
self.span.events.changed.connect(self._span_changed, {'obj': 'widget'})
self.span.step_ax = self.axes[0]
self.span.tolerance = preferences.Plot.pick_tolerance
self.patch = [self.span.rect]
self.patch[0].set_color(self.color)
self.patch[0].set_alpha(self.alpha)
def _span_changed(self, widget):
r = self._get_range()
pr = widget.range
if r != pr:
dx = self.axes[0].scale
x = pr[0] + 0.5 * dx
w = pr[1] + 0.5 * dx - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._validate_geometry()
if self._pos != np.array([x]) or self._size != np.array([w]):
self._update_patch_size()
self._apply_changes(old_size=old_size, old_position=old_position)
def _get_range(self):
p = self._pos[0]
w = self._size[0]
offset = self.axes[0].scale
p -= 0.5 * offset
return (p, p + w)
def _parse_bounds_args(self, args, kwargs):
if len(args) == 1:
return args[0]
elif len(args) == 4:
return args
elif len(kwargs) == 1 and 'bounds' in kwargs:
return kwargs.values()[0]
else:
x = kwargs.pop('x', kwargs.pop('left', self._pos[0]))
if 'right' in kwargs:
w = kwargs.pop('right') - x
else:
w = kwargs.pop('w', kwargs.pop('width', self._size[0]))
return x, w
def set_ibounds(self, *args, **kwargs):
"""
Set bounds by indices. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right'
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
ix, iw = self._parse_bounds_args(args, kwargs)
x = self.axes[0].index2value(ix)
w = self._i2v(self.axes[0], ix + iw) - x
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def set_bounds(self, *args, **kwargs):
"""
Set bounds by values. Bounds can either be specified in order left,
bottom, width, height; or by keywords:
* 'bounds': tuple (left, width)
OR
* 'x'/'left'
* 'w'/'width', alternatively 'right' (x+w)
If specifying with keywords, any unspecified dimensions will be kept
constant (note: width will be kept, not right).
"""
global already_warn_out_of_range
def warn(obj, parameter, value):
global already_warn_out_of_range
if not already_warn_out_of_range:
_logger.info('{}: {} is out of range. It is therefore set '
'to the value of {}'.format(obj, parameter, value))
already_warn_out_of_range = True
x, w = self._parse_bounds_args(args, kwargs)
l0, h0 = self.axes[0].low_value, self.axes[0].high_value
scale = self.axes[0].scale
in_range = 0
if x < l0:
x = l0
warn(self, '`x` or `left`', x)
elif h0 <= x:
x = h0 - scale
warn(self, '`x` or `left`', x)
else:
in_range += 1
if w < scale:
w = scale
warn(self, '`width` or `right`', w)
elif not (l0 + scale <= x + w <= h0 + scale):
if self.size != np.array([w]): # resize
w = h0 + scale - self.position[0]
warn(self, '`width` or `right`', w)
if self.position != np.array([x]): # moved
x = h0 + scale - self.size[0]
warn(self, '`x` or `left`', x)
else:
in_range += 1
# if we are in range again, reset `already_warn_out_of_range` to False
if in_range == 2 and already_warn_out_of_range:
_logger.info('{} back in range.'.format(self.__class__.__name__))
already_warn_out_of_range = False
old_position, old_size = self.position, self.size
self._pos = np.array([x])
self._size = np.array([w])
self._apply_changes(old_size=old_size, old_position=old_position)
def _update_patch_position(self):
self._update_patch_geometry()
def _update_patch_size(self):
self._update_patch_geometry()
def _update_patch_geometry(self):
if self.is_on and self.span is not None:
self.span.range = self._get_range()
def disconnect(self):
super(RangeWidget, self).disconnect()
if self.span:
self.span.turn_off()
self.span = None
def _set_snap_position(self, value):
super(RangeWidget, self)._set_snap_position(value)
if self.span is not None:
self.span.snap_position = value
self._update_patch_geometry()
def _set_snap_size(self, value):
super(RangeWidget, self)._set_snap_size(value)
if self.span is not None:
self.span.snap_size = value
self._update_patch_size()
def _validate_geometry(self, x1=None):
"""Make sure the entire patch always stays within bounds. First the
position (either from position property or from x1 argument), is
limited within the bounds. Then, if the right edge are out of
bounds, the position is changed so that they will be at the limit.
The modified geometry is stored, but no change checks are performed.
Call _apply_changes after this in order to process any changes (the
size might change if it is set larger than the bounds size).
"""
xaxis = self.axes[0]
# Make sure widget size is not larger than axes
self._size[0] = min(self._size[0], xaxis.size * xaxis.scale)
# Make sure x1 is within bounds
if x1 is None:
x1 = self._pos[0] # Get it if not supplied
if x1 < xaxis.low_value:
x1 = xaxis.low_value
elif x1 > xaxis.high_value:
x1 = xaxis.high_value
# Make sure x2 is with upper bound.
# If not, keep dims, and change x1!
x2 = x1 + self._size[0]
if x2 > xaxis.high_value + xaxis.scale:
x2 = xaxis.high_value + xaxis.scale
x1 = x2 - self._size[0]
self._pos = np.array([x1])
# Apply snaps if appropriate
if self.snap_position:
self._do_snap_position()
if self.snap_size:
self._do_snap_size()
class ModifiableSpanSelector(SpanSelector):
def __init__(self, ax, **kwargs):
onselect = kwargs.pop('onselect', self.dummy)
direction = kwargs.pop('direction', 'horizontal')
useblit = kwargs.pop('useblit', ax.figure.canvas.supports_blit)
SpanSelector.__init__(self, ax, onselect, direction=direction,
useblit=useblit, span_stays=False, **kwargs)
# The tolerance in points to pick the rectangle sizes
self.tolerance = preferences.Plot.pick_tolerance
self.on_move_cid = None
self._range = None
self.step_ax = None
self.bounds_check = False
self._button_down = False
self.snap_size = False
self.snap_position = False
self.events = Events()
self.events.changed = Event(doc="""
Event that triggers when the widget was changed.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.moved = Event(doc="""
Event that triggers when the widget was moved.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.events.resized = Event(doc="""
Event that triggers when the widget was resized.
Arguments:
----------
obj:
The widget that changed
""", arguments=['obj'])
self.can_switch = False
def dummy(self, *args, **kwargs):
pass
def _get_range(self):
self.update_range()
return self._range
def _set_range(self, value):
self.update_range()
if self._range != value:
resized = (
self._range[1] -
self._range[0]) != (
value[1] -
value[0])
moved = self._range[0] != value[0]
self._range = value
if moved:
self._set_span_x(value[0])
self.events.moved.trigger(self)
if resized:
self._set_span_width(value[1] - value[0])
self.events.resized.trigger(self)
if moved or resized:
self.draw_patch()
self.events.changed.trigger(self)
range = property(_get_range, _set_range)
def _set_span_x(self, value):
if self.direction == 'horizontal':
self.rect.set_x(value)
else:
self.rect.set_y(value)
def _set_span_width(self, value):
if self.direction == 'horizontal':
self.rect.set_width(value)
else:
self.rect.set_height(value)
def _get_span_x(self):
if self.direction == 'horizontal':
return self.rect.get_x()
else:
return self.rect.get_y()
def _get_span_width(self):
if self.direction == 'horizontal':
return self.rect.get_width()
else:
return self.rect.get_height()
def _get_mouse_position(self, event):
if self.direction == 'horizontal':
return event.xdata
else:
return event.ydata
def set_initial(self, initial_range=None):
"""
Remove selection events, set the spanner, and go to modify mode.
"""
if initial_range is not None:
self.range = initial_range
self.disconnect_events()
# And connect to the new ones
self.connect_event('button_press_event', self.mm_on_press)
self.connect_event('button_release_event', self.mm_on_release)
self.rect.set_visible(True)
self.rect.contains = self.contains
def update(self, *args):
# Override the SpanSelector `update` method to blit properly all
# artirts before we go to "modify mode" in `set_initial`.
self.set_visible(True)
def draw_patch(self, *args):
"""Update the patch drawing.
"""
try:
if hasattr(self.ax, 'hspy_fig'):
self.ax.hspy_fig.render_figure()
elif self.ax.figure is not None:
self.ax.figure.canvas.draw_idle()
except AttributeError: # pragma: no cover
pass # When figure is None, typically when closing
def contains(self, mouseevent):
x, y = self.rect.get_transform().inverted().transform_point(
(mouseevent.x, mouseevent.y))
v = x if self.direction == 'vertical' else y
# Assert y is correct first
if not (0.0 <= v <= 1.0):
return False, {}
x_pt = self._get_point_size_in_data_units()
hit = self._range[0] - x_pt, self._range[1] + x_pt
if hit[0] < self._get_mouse_position < hit[1]:
return True, {}
return False, {}
def release(self, event):
"""When the button is released, the span stays in the screen and the
iteractivity machinery passes to modify mode"""
if self.pressv is None or (self.ignore(
event) and not self._button_down):
return
self._button_down = False
self.update_range()
self.set_initial()
def _get_point_size_in_data_units(self):
# Calculate the point size in data units
invtrans = self.ax.transData.inverted()
(x, y) = (1, 0) if self.direction == 'horizontal' else (0, 1)
x_pt = self.tolerance * abs((invtrans.transform((x, y)) -
invtrans.transform((0, 0)))[y])
return x_pt
def mm_on_press(self, event):
if self.ignore(event) and not self._button_down:
return
self._button_down = True
x_pt = self._get_point_size_in_data_units()
# Determine the size of the regions for moving and stretching
self.update_range()
left_region = self._range[0] - x_pt, self._range[0] + x_pt
right_region = self._range[1] - x_pt, self._range[1] + x_pt
middle_region = self._range[0] + x_pt, self._range[1] - x_pt
if in_interval(self._get_mouse_position(event), left_region) is True:
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
elif in_interval(self._get_mouse_position(event), right_region):
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
elif in_interval(self._get_mouse_position(event), middle_region):
self.pressv = self._get_mouse_position(event)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_rect)
else:
return
def update_range(self):
self._range = (self._get_span_x(),
self._get_span_x() + self._get_span_width())
def switch_left_right(self, x, left_to_right):
if left_to_right:
if self.step_ax is not None:
if x > self.step_ax.high_value + self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r0 = self._range[1]
self._set_span_x(r0)
r1 = r0 + w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_right)
else:
if self.step_ax is not None:
if x < self.step_ax.low_value - self.step_ax.scale:
return
w = self._range[1] - self._range[0]
r1 = self._range[0]
r0 = r1 - w
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = \
self.canvas.mpl_connect('motion_notify_event',
self.move_left)
self._range = (r0, r1)
def move_left(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x < self.step_ax.low_value - self.step_ax.scale):
return
if self.snap_position:
snap_offset = self.step_ax.offset - 0.5 * self.step_ax.scale
elif self.snap_size:
snap_offset = self._range[1]
if self.snap_position or self.snap_size:
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the left edge beyond the right one.
if x >= self._range[1]:
if self.can_switch and x > self._range[1]:
self.switch_left_right(x, True)
self.move_right(event)
return
width_increment = self._range[0] - x
if self._get_span_width() + width_increment <= 0:
return
self._set_span_x(x)
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.moved.trigger(self)
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_right(self, event):
if self._button_down is False or self.ignore(event):
return
x = self._get_mouse_position(event)
if self.step_ax is not None:
if (self.bounds_check and
x > self.step_ax.high_value + self.step_ax.scale):
return
if self.snap_size:
snap_offset = self._range[0]
rem = (x - snap_offset) % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x += rem
# Do not move the right edge beyond the left one.
if x <= self._range[0]:
if self.can_switch and x < self._range[0]:
self.switch_left_right(x, False)
self.move_left(event)
return
width_increment = x - self._range[1]
if self._get_span_width() + width_increment <= 0:
return
self._set_span_width(self._get_span_width() + width_increment)
self.update_range()
self.events.resized.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def move_rect(self, event):
if (self._button_down is False or self.ignore(event) or
self._get_mouse_position(event) is None):
return
x_increment = self._get_mouse_position(event) - self.pressv
if self.step_ax is not None:
if (self.bounds_check
and self._range[0] <= self.step_ax.low_value
and self._get_mouse_position(event) <= self.pressv):
return
if (self.bounds_check
and self._range[1] >= self.step_ax.high_value
and self._get_mouse_position(event) >= self.pressv):
return
if self.snap_position:
rem = x_increment % self.step_ax.scale
if rem / self.step_ax.scale < 0.5:
rem = -rem
else:
rem = self.step_ax.scale - rem
x_increment += rem
self._set_span_x(self._get_span_x() + x_increment)
self.update_range()
self.pressv += x_increment
self.events.moved.trigger(self)
self.events.changed.trigger(self)
if self.onmove_callback is not None:
self.onmove_callback(*self._range)
self.draw_patch()
def mm_on_release(self, event):
if self._button_down is False or self.ignore(event):
return
self._button_down = False
self.canvas.mpl_disconnect(self.on_move_cid)
self.on_move_cid = None
def turn_off(self):
self.disconnect_events()
if self.on_move_cid is not None:
self.canvas.mpl_disconnect(self.on_move_cid)
self.ax.patches.remove(self.rect)
self.ax.figure.canvas.draw_idle()
| gpl-3.0 |
amolsharma99/PredictingJobEntityTypeFromString | code.py | 1 | 4568 | import pandas as pd
import numpy as np
import json
import os
import tqdm
import re
import xgboost as xgb
from sklearn import preprocessing
from sklearn import cross_validation
def extract_date(row):
return row['postedDate']['$date']
def get_df_from_json(filename, mode):
with open(filename) as json_data:
data = json_data.readlines()
data = map(lambda x: x.rstrip(), data)
data_json_str = "[" + ','.join(data) + "]"
df = pd.read_json(data_json_str)
if mode != 'test':
df = df.drop('_id', axis = 1)
df.postedDate = df.apply(lambda row: extract_date(row), axis = 1)
return df
df = get_df_from_json('jobs_huge.json', mode = 'train')
new_df = pd.DataFrame(columns=['string', 'class'])
columns = df.columns
#wanted to name it extract
def extra_string_n_class(row, new_df):
for column in tqdm.tqdm(columns):
new_df.loc[len(new_df)]=[row[column], column]
df.apply(lambda row: extra_string_n_class(row, new_df), axis = 1)
# to save time if my ipython starts again.
new_df.to_csv('transformed_jobs.csv', encoding='utf-8', index=False)
new_df = pd.read_csv('transformed_jobs.csv', encoding='utf-8')
######Feature Engineering#######
def all_digits(key):
try:
x = int(key)
except ValueError:
return 0
return 1
def num_digits(key):
try:
count = sum(c.isdigit() for c in key)
return count
except TypeError:
print "error while counting digts in", key
return 10
def has_html_tags(key):
try:
pat = re.compile('<.*?>')
match = re.match(pat, key)
if match:
return 1
else:
return 0
except TypeError:
print "error while has_html_tags in", key
return 10
def len_str(key):
return len(key)
def occurance_count(df, key, keyname):
return len(df[df[keyname] == key])
#save occurance as feature and then drop duplicates
new_df['occurance_count'] = new_df.apply(lambda row: occurance_count(new_df, row['string'], 'string'), axis = 1)
new_df = new_df.drop_duplicates()
# New feature columns 'all_digits', 'num_digits', 'has_html_tags', 'len_str', 'is_known_country', 'occurance_count'
new_df['all_digits'] = new_df.apply(lambda row: all_digits(row['string']), axis = 1)
new_df['num_digits'] = new_df.apply(lambda row: num_digits(row['string']), axis = 1)
new_df['has_html_tags'] = new_df.apply(lambda row: has_html_tags(row['string']), axis = 1)
new_df['len_str'] = new_df.apply(lambda row: len_str(row['string']), axis = 1)
###########Classification############
le_class = preprocessing.LabelEncoder()
le_class.fit(new_df['class'])
print le_class.classes_
new_df['en_class'] = le_class.transform(new_df['class'])
Y = new_df.en_class
X = new_df.drop(['string','class', 'en_class'], axis = 1)
Y = Y.reshape(6048, 1)
clf = xgb.XGBClassifier(objective='reg:logistic', nthread=4, seed=0)
clf.fit(X,Y)
Y = Y.reshape(6048,)
#by default 3 fold cross_validation
scores = cross_validation.cross_val_score(clf, X, Y)
print "3 fold scores: ", scores
print "training set score: ", clf.score(X,Y)
#accurcy 99% on training set
test_df = get_df_from_json('test/test_tiny_1.txt', mode = 'test')
test_new_df = test_df[test_df['key']!='']
#5112 non-empty keys, 882 empty keys.
# to save time if my ipython starts again.
test_new_df.to_csv('transformed_test.csv', encoding='utf-8', index=False)
test_new_df = pd.read_csv('transformed_test.csv', encoding='utf-8')
test_new_df['occurance_count'] = test_new_df.apply(lambda row: occurance_count(test_new_df, row['key'], 'key'), axis = 1)
test_new_df = test_new_df.drop_duplicates()
strings = test_new_df['key']
# New feature columns 'all_digits', 'num_digits', 'has_html_tags', 'len_str', 'is_known_country', 'occurance_count'
test_new_df['all_digits'] = test_new_df.apply(lambda row: all_digits(row['key']), axis = 1)
test_new_df['num_digits'] = test_new_df.apply(lambda row: num_digits(row['key']), axis = 1)
test_new_df['has_html_tags'] = test_new_df.apply(lambda row: has_html_tags(row['key']), axis = 1)
test_new_df['len_str'] = test_new_df.apply(lambda row: len_str(row['key']), axis = 1)
id = test_new_df.id
X = test_new_df.drop(['actual', 'id', 'key'], axis = 1)
Y_predict = clf.predict(X)
Y_predict = le_class.inverse_transform(Y_predict)
#dropped empty keys and dropped duplicates.
print len(id), len(Y_predict), len(strings)
ans_df = pd.DataFrame({'id': id, 'actual': Y_predict, 'key': strings})
ans_df.to_csv('test_tiny_1_out.csv', index= False, encoding='utf=8') | mit |
pradyu1993/scikit-learn | sklearn/feature_extraction/text.py | 1 | 35696 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
#
# License: BSD Style.
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from collections import Mapping
from operator import itemgetter
import re
import unicodedata
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import normalize
from ..utils.fixes import Counter
from .stop_words import ENGLISH_STOP_WORDS
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return u''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(ur"<([^>]+)>", flags=re.UNICODE).sub(u" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str) or isinstance(stop, unicode):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class CountVectorizer(BaseEstimator):
"""Convert a collection of raw documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analysing the data. The default
analyzer does simple stop word filtering for English.
Parameters
----------
input: string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
charset: string, 'utf-8' by default.
If bytes or files are given to analyze, this charset is used to
decode.
charset_error: {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `charset`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned is currently the only
supported string value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase: boolean, default True
Convert all characters to lowercase befor tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 2 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold. This value is also called
cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary: Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
"""
_white_spaces = re.compile(ur"\s\s+")
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=ur"(?u)\b\w\w+\b",
ngram_range=(1, 1),
min_n=None, max_n=None, analyzer='word',
max_df=1.0, min_df=2, max_features=None,
vocabulary=None, binary=False, dtype=long):
self.input = input
self.charset = charset
self.charset_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
if not (max_n is None) or not (min_n is None):
warnings.warn('Parameters max_n and min_n are deprecated. Use '
'ngram_range instead. This will be removed in 0.14.',
DeprecationWarning, stacklevel=2)
if min_n is None:
min_n = 1
if max_n is None:
max_n = min_n
ngram_range = (min_n, max_n)
self.ngram_range = ngram_range
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocabulary = dict((t, i) for i, t in enumerate(vocabulary))
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary = True
self.vocabulary_ = vocabulary
else:
self.fixed_vocabulary = False
self.binary = binary
self.dtype = dtype
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
doc = open(doc, 'rb').read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.charset, self.charset_error)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(u" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(u" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(u" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = u' ' + w + u' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the however of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif hasattr(self.strip_accents, '__call__'):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that split a string in sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if hasattr(self.analyzer, '__call__'):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _term_count_dicts_to_matrix(self, term_count_dicts):
i_indices = []
j_indices = []
values = []
vocabulary = self.vocabulary_
for i, term_count_dict in enumerate(term_count_dicts):
for term, count in term_count_dict.iteritems():
j = vocabulary.get(term)
if j is not None:
i_indices.append(i)
j_indices.append(j)
values.append(count)
# free memory as we go
term_count_dict.clear()
shape = (len(term_count_dicts), max(vocabulary.itervalues()) + 1)
spmatrix = sp.coo_matrix((values, (i_indices, j_indices)),
shape=shape, dtype=self.dtype)
if self.binary:
spmatrix.data.fill(1)
return spmatrix
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return the count vectors
This is more efficient than calling fit followed by transform.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
if self.fixed_vocabulary:
# No need to fit anything, directly perform the transformation.
# We intentionally don't call the transform method to make it
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc))
for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
self.vocabulary_ = {}
# result of document conversion to term count dicts
term_counts_per_doc = []
term_counts = Counter()
# term counts across entire corpus (count each term maximum once per
# document)
document_counts = Counter()
analyze = self.build_analyzer()
# TODO: parallelize the following loop with joblib?
# (see XXX up ahead)
for doc in raw_documents:
term_count_current = Counter(analyze(doc))
term_counts.update(term_count_current)
document_counts.update(term_count_current.iterkeys())
term_counts_per_doc.append(term_count_current)
n_doc = len(term_counts_per_doc)
max_features = self.max_features
max_df = self.max_df
min_df = self.min_df
max_doc_count = (max_df if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
# filter out stop words: terms that occur in almost all documents
if max_doc_count < n_doc or min_doc_count > 1:
stop_words = set(t for t, dc in document_counts.iteritems()
if dc > max_doc_count or dc < min_doc_count)
else:
stop_words = set()
# list the terms that should be part of the vocabulary
if max_features is None:
terms = set(term_counts) - stop_words
else:
# extract the most frequent terms for the vocabulary
terms = set()
for t, tc in term_counts.most_common():
if t not in stop_words:
terms.add(t)
if len(terms) >= max_features:
break
# store the learned stop words to make it easier to debug the value of
# max_df
self.max_df_stop_words_ = stop_words
# store map from term name to feature integer index: we sort the term
# to have reproducible outcome for the vocabulary structure: otherwise
# the mapping from feature name to indices might depend on the memory
# layout of the machine. Furthermore sorted terms might make it
# possible to perform binary search in the feature names array.
vocab = dict(((t, i) for i, t in enumerate(sorted(terms))))
if not vocab:
raise ValueError("empty vocabulary; training set may have"
" contained only stop words")
self.vocabulary_ = vocab
# the term_counts and document_counts might be useful statistics, are
# we really sure want we want to drop them? They take some memory but
# can be useful for corpus introspection
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def transform(self, raw_documents):
"""Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided in the constructor.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# raw_documents can be an iterable so we don't know its size in
# advance
# XXX @larsmans tried to parallelize the following loop with joblib.
# The result was some 20% slower than the serial version.
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc)) for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.isspmatrix_coo(X): # COO matrix is not indexable
X = X.tocsr()
elif not sp.issparse(X):
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(self.vocabulary_.keys())
indices = np.array(self.vocabulary_.values())
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in xrange(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(self.vocabulary_.iteritems(),
key=itemgetter(1))]
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf–idf representation
Tf means term-frequency while tf–idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf–idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
In the SMART notation used in IR, this class implements several tf–idf
variants. Tf is always "n" (natural), idf is "t" iff use_idf is given,
"n" otherwise, and normalization is "c" iff norm='l2', "n" iff norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68–74.`
.. [MSR2008] `C.D. Manning, H. Schütze and P. Raghavan (2008). Introduction
to Information Retrieval. Cambridge University Press,
pp. 121–125.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X: sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if self.use_idf:
if not hasattr(X, 'nonzero'):
X = sp.csr_matrix(X)
n_samples, n_features = X.shape
df = np.bincount(X.nonzero()[1])
if df.shape[0] < n_features:
# bincount might return fewer bins than there are features
df = np.concatenate([df, np.zeros(n_features - df.shape[0])])
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# avoid division by zeros for features that occur in all documents
idf = np.log(float(n_samples) / df) + 1.0
idf_diag = sp.lil_matrix((n_features, n_features))
idf_diag.setdiag(idf)
self._idf_diag = sp.csc_matrix(idf_diag)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf–idf representation
Parameters
----------
X: sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input: string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
charset: string, 'utf-8' by default.
If bytes or files are given to analyze, this charset is used to
decode.
charset_error: {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `charset`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned is currently the only
supported string value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase: boolean, default True
Convert all characters to lowercase befor tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 2 by default
When building the vocabulary ignore terms that have a term frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary: Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=ur"(?u)\b\w\w+\b", min_n=None,
max_n=None, ngram_range=(1, 1), max_df=1.0, min_df=2,
max_features=None, vocabulary=None, binary=False, dtype=long,
norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern, min_n=min_n,
max_n=max_n, ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=False,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
def fit(self, raw_documents, y=None):
"""Learn a conversion law from documents to array data"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the representation and return the vectors.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform raw text documents to tf–idf vectors
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy)
| bsd-3-clause |
bhargavvader/pycobra | pycobra/classifiercobra.py | 1 | 12911 | # Licensed under the MIT License - https://opensource.org/licenses/MIT
from sklearn import neighbors, tree, svm
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.utils import shuffle
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
import math
import numpy as np
import random
import logging
import numbers
logger = logging.getLogger('pycobra.classifiercobra')
class ClassifierCobra(BaseEstimator):
"""
Classification algorithm as introduced by
Mojirsheibani [1999] Combining Classifiers via Discretization,
Journal of the American Statistical Association.
Parameters
----------
random_state: integer or a numpy.random.RandomState object.
Set the state of the random number generator to pass on to shuffle and loading machines, to ensure
reproducibility of your experiments, for example.
Attributes
----------
machines: A dictionary which maps machine names to the machine objects.
The machine object must have a predict method for it to be used during aggregation.
machine_predictions: A dictionary which maps machine name to it's predictions over X_l
This value is used to determine which points from y_l are used to aggregate.
"""
def __init__(self, random_state=None, machine_list='basic'):
self.random_state = random_state
self.machine_list = machine_list
def fit(self, X, y, default=True, X_k=None, X_l=None, y_k=None, y_l=None):
"""
Parameters
----------
X: array-like, [n_samples, n_features]
Training data which will be used to create ClassifierCobra.
y: array-like [n_samples]
Training labels for classification.
default: bool, optional
If set as true then sets up COBRA with default machines and splitting.
X_k : shape = [n_samples, n_features]
Training data which is used to train the machines loaded into COBRA.
y_k : array-like, shape = [n_samples]
Target values used to train the machines loaded into COBRA.
X_l : shape = [n_samples, n_features]
Training data which is used during the aggregation of COBRA.
y_l : array-like, shape = [n_samples]
Target values which are actually used in the aggregation of COBRA.
"""
X, y = check_X_y(X, y)
self.X_ = X
self.y_ = y
self.X_k_ = X_k
self.X_l_ = X_l
self.y_k_ = y_k
self.y_l_ = y_l
self.estimators_ = {}
# try block to pass scikit-learn estimator check.
try:
# set-up COBRA with default machines
if default:
self.split_data()
self.load_default(machine_list=self.machine_list)
self.load_machine_predictions()
except ValueError:
return self
return self
def pred(self, X, M, info=False):
"""
Performs the CLassififerCobra aggregation scheme, used in predict method.
Parameters
----------
X: array-like, [n_features]
M: int, optional
M refers to the number of machines the prediction must be close to to be considered during aggregation.
info: boolean, optional
If info is true the list of points selected in the aggregation is returned.
Returns
-------
result: prediction
"""
# dictionary mapping machine to points selected
select = {}
for machine in self.estimators_:
# machine prediction
label = self.estimators_[machine].predict(X)
select[machine] = set()
# iterating from l to n
# replace with numpy iteration
for count in range(0, len(self.X_l_)):
if self.machine_predictions_[machine][count] == label:
select[machine].add(count)
points = []
# count is the indice number.
for count in range(0, len(self.X_l_)):
# row check is number of machines which picked up a particular point
row_check = 0
for machine in select:
if count in select[machine]:
row_check += 1
if row_check == M:
points.append(count)
# if no points are selected, return 0
if len(points) == 0:
if info:
logger.info("No points were selected, prediction is 0")
return (0, 0)
logger.info("No points were selected, prediction is 0")
return 0
# aggregate
classes = {}
for label in np.unique(self.y_l_):
classes[label] = 0
for point in points:
classes[self.y_l_[point]] += 1
result = int(max(classes, key=classes.get))
if info:
return result, points
return result
def predict(self, X, M=None, info=False):
"""
Performs the ClassifierCobra aggregation scheme, calls pred.
ClassifierCobra performs a majority vote among all points which are retained by the COBRA procedure.
Parameters
----------
X: array-like, [n_features]
M: int, optional
M refers to the number of machines the prediction must be close to to be considered during aggregation.
info: boolean, optional
If info is true the list of points selected in the aggregation is returned.
Returns
-------
result: prediction
"""
X = check_array(X)
if M is None:
M = len(self.estimators_)
if X.ndim == 1:
return self.pred(X.reshape(1, -1), M=M)
result = np.zeros(len(X))
avg_points = 0
index = 0
for vector in X:
if info:
result[index], points = self.pred(vector.reshape(1, -1), M=M, info=info)
avg_points += len(points)
else:
result[index] = self.pred(vector.reshape(1, -1), M=M)
index += 1
if info:
avg_points = avg_points / len(X_array)
return result, avg_points
return result
def predict_proba(self, X, kernel=None, metric=None, bandwidth=1, **kwargs):
"""
Performs the ClassifierCobra aggregation scheme and calculates probability of a point being in a particular class.
ClassifierCobra performs a majority vote among all points which are retained by the COBRA procedure.
NOTE: this method is to visualise boundaries.
The current method is just the mean of the consituent machines, as the concept of that kind of predicted probability
doesn't exist (yet) for classifier cobra.
Parameters
----------
X: array-like, [n_features]
"""
probs = []
for machine in self.estimators_:
try:
probs.append(self.estimators_[machine].predict_proba(X))
except AttributeError:
continue
prob = np.mean(probs, axis=0)
return prob
def split_data(self, k=None, l=None, shuffle_data=True):
"""
Split the data into different parts for training machines and for aggregation.
Parameters
----------
k : int, optional
k is the number of points used to train the machines.
Those are the first k points of the data provided.
l: int, optional
l is the number of points used to form the ClassifierCobra aggregate.
shuffle: bool, optional
Boolean value to decide to shuffle the data before splitting.
Returns
-------
self : returns an instance of self.
"""
if shuffle_data:
self.X_, self.y_ = shuffle(self.X_, self.y_, random_state=self.random_state)
if k is None and l is None:
k = int(len(self.X_) / 2)
l = int(len(self.X_))
if k is not None and l is None:
l = len(self.X_) - k
if l is not None and k is None:
k = len(self.X_) - l
self.X_k_ = self.X_[:k]
self.X_l_ = self.X_[k:l]
self.y_k_ = self.y_[:k]
self.y_l_ = self.y_[k:l]
return self
def load_default(self, machine_list='basic'):
"""
Loads 4 different scikit-learn regressors by default. The advanced list adds more machines.
As of current release SGD algorithm is not included in the advanced list.
Parameters
----------
machine_list: optional, list of strings
List of default machine names to be loaded.
Returns
-------
self : returns an instance of self.
"""
if machine_list == 'basic':
machine_list = ['sgd', 'tree', 'knn', 'svm']
if machine_list == 'advanced':
machine_list = ['tree', 'knn', 'svm', 'logreg', 'naive_bayes', 'lda', 'neural_network']
for machine in machine_list:
try:
if machine == 'svm':
self.estimators_['svm'] = svm.SVC().fit(self.X_k_, self.y_k_)
if machine == 'knn':
self.estimators_['knn'] = neighbors.KNeighborsClassifier().fit(self.X_k_, self.y_k_)
if machine == 'tree':
self.estimators_['tree'] = tree.DecisionTreeClassifier().fit(self.X_k_, self.y_k_)
if machine == 'logreg':
self.estimators_['logreg'] = LogisticRegression(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'naive_bayes':
self.estimators_['naive_bayes'] = GaussianNB().fit(self.X_k_, self.y_k_)
if machine == 'lda':
self.estimators_['lda'] = LinearDiscriminantAnalysis().fit(self.X_k_, self.y_k_)
if machine == 'neural_network':
self.estimators_['neural_network'] = MLPClassifier(random_state=self.random_state).fit(self.X_k_, self.y_k_)
except ValueError:
continue
return self
def load_machine(self, machine_name, machine):
"""
Adds a machine to be used during the aggregation strategy.
The machine object must have been trained using X_k and y_k, and must have a 'predict()' method.
After the machine is loaded, for it to be used during aggregation, load_machine_predictions must be run.
Parameters
----------
machine_name : string
Name of the machine you are loading
machine: machine/regressor object
The regressor machine object which is mapped to the machine_name
Returns
-------
self : returns an instance of self.
"""
self.estimators_[machine_name] = machine
return self
def load_machine_predictions(self, predictions=None):
"""
Stores the trained machines' predicitons on D_l in a dictionary, to be used for predictions.
Should be run after all the machines to be used for aggregation is loaded.
Parameters
----------
predictions: dictionary, optional
A pre-existing machine:predictions dictionary can also be loaded.
Returns
-------
self : returns an instance of self.
"""
self.machine_predictions_ = {}
if predictions is None:
for machine in self.estimators_:
self.machine_predictions_[machine] = self.estimators_[machine].predict(self.X_l_)
return self
def load_machine_proba_predictions(self, predictions=None):
"""
Stores the trained machines' predicitons on D_l in a dictionary, to be used for predictions.
Should be run after all the machines to be used for aggregation is loaded.
Parameters
----------
predictions: dictionary, optional
A pre-existing machine:predictions dictionary can also be loaded.
Returns
-------
self : returns an instance of self.
"""
self.machine_proba_predictions_ = {}
if predictions is None:
for machine in self.estimators_:
try:
self.machine_proba_predictions_[machine] = self.estimators_[machine].predict_proba(self.X_l_)
except AttributeError:
self.machine_proba_predictions_[machine] = self.estimators_[machine].decision_function(self.X_l_)
return self
| mit |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py | 70 | 375423 | """
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
| gpl-3.0 |
pavelchristof/gomoku-ai | tensorflow/contrib/timeseries/examples/known_anomaly.py | 53 | 6786 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def train_and_evaluate_exogenous(csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.contrib.layers.sparse_column_with_keys(
column_name="is_changepoint", keys=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.contrib.layers.one_hot_column(
sparse_id_column=string_feature)
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=[one_hot_feature],
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with
# "leaky" updates which add unnecessary uncertainty to the model even when
# there is no changepoint.
exogenous_update_condition=
lambda times, features: tf.equal(features["is_changepoint"], "yes"))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly", *train_and_evaluate_exogenous())
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
shenzebang/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
bioasp/caspo | caspo/core/logicalnetwork.py | 1 | 28826 | # Copyright (c) 2014-2016, Santiago Videla
#
# This file is part of caspo.
#
# caspo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# caspo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with caspo. If not, see <http://www.gnu.org/licenses/>.import random
# -*- coding: utf-8 -*-
from collections import defaultdict
import itertools as it
import networkx as nx
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
from sklearn.metrics import mean_squared_error
import clingo
from .clause import Clause
from .mapping import Mapping
from .graph import Graph
from .hypergraph import HyperGraph
def __parallel_predictions__(network, clampings, readouts, stimuli=None, inhibitors=None):
return network.predictions(clampings, readouts, stimuli, inhibitors).values
def __parallel_mse__(network, clampings, readouts, observations, pos):
return mean_squared_error(observations, (network.predictions(clampings, readouts).values)[pos])
class LogicalNetworkList(object):
"""
List of :class:`caspo.core.logicalnetwork.LogicalNetwork` object instances
Parameters
----------
hg : :class:`caspo.core.hypergraph.HyperGraph`
Underlying hypergraph of all logical networks.
matrix : Optional[`numpy.ndarray`_]
2-D binary array representation of all logical networks.
If None, an empty array is initialised
networks : Optional[`numpy.ndarray`_]
For each network in the list, it gives the number of networks having the same behavior.
If None, an array of ones is initialised with the same length as the number of networks in the list.
.. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray
"""
def __init__(self, hg, matrix=None, networks=None):
self.hg = hg
if matrix is None:
self.__matrix = np.array([])
else:
self.__matrix = matrix
if isinstance(networks, np.ndarray):
self.__networks = networks
else:
self.__networks = np.array(networks, dtype=int) if networks else np.ones(len(self.__matrix), dtype=int)
@classmethod
def from_csv(cls, filename):
"""
Creates a list of logical networks from a CSV file.
Columns that cannot be parsed as a :class:`caspo.core.mapping.Mapping` are ignored
except for a column named `networks` which (if present) is interpreted as the number
of logical networks having the same input-output behavior.
Parameters
----------
filename : str
Absolute path to CSV file
Returns
-------
caspo.core.logicalnetwork.LogicalNetworkList
Created object instance
"""
df = pd.read_csv(filename)
edges = set()
mappings = []
cols = []
for m in df.columns:
try:
ct = Mapping.from_str(m)
mappings.append(ct)
cols.append(m)
for source, sign in ct.clause:
edges.add((source, ct.target, sign))
except ValueError:
#current column isn't a mapping
pass
graph = Graph.from_tuples(edges)
hypergraph = HyperGraph.from_graph(graph)
hypergraph.mappings = mappings
if 'networks' in df.columns:
nnet = df['networks'].values.astype(int)
else:
nnet = None
return cls(hypergraph, matrix=df[cols].values, networks=nnet)
@classmethod
def from_hypergraph(cls, hypergraph, networks=None):
"""
Creates a list of logical networks from a given hypergraph and an
optional list of :class:`caspo.core.logicalnetwork.LogicalNetwork` object instances
Parameters
----------
hypegraph : :class:`caspo.core.hypergraph.HyperGraph`
Underlying hypergraph for this logical network list
networks : Optional[list]
List of :class:`caspo.core.logicalnetwork.LogicalNetwork` object instances
Returns
-------
caspo.core.logicalnetwork.LogicalNetworkList
Created object instance
"""
matrix = None
nnet = None
if networks:
matrix = np.array([networks[0].to_array(hypergraph.mappings)])
nnet = [networks[0].networks]
for network in networks[1:]:
matrix = np.append(matrix, [network.to_array(hypergraph.mappings)], axis=0)
nnet.append(network.networks)
return cls(hypergraph, matrix, nnet)
def add_network(self, pos, network):
"""
Adds a network to the logical network at the given position
"""
self.__networks[pos] += network.networks
@property
def mappings(self):
"""
:class:`caspo.core.mapping.MappingList`: the list of mappings present in at least one logical network in this list
"""
return self.hg.mappings[np.unique(np.where(self.__matrix == 1)[1])]
def reset(self):
"""
Drop all networks in the list
"""
self.__matrix = np.array([])
self.__networks = np.array([])
def split(self, indices):
"""
Splits logical networks according to given indices
Parameters
----------
indices : list
1-D array of sorted integers, the entries indicate where the array is split
Returns
-------
list
List of :class:`caspo.core.logicalnetwork.LogicalNetworkList` object instances
.. seealso:: `numpy.split <http://docs.scipy.org/doc/numpy/reference/generated/numpy.split.html#numpy-split>`_
"""
return [LogicalNetworkList(self.hg, part) for part in np.split(self.__matrix, indices)]
def concat(self, other):
"""
Returns the concatenation with another :class:`caspo.core.logicalnetwork.LogicalNetworkList` object instance.
It is assumed (not checked) that both have the same underlying hypergraph.
Parameters
----------
other : :class:`caspo.core.logicalnetwork.LogicalNetworkList`
The list to concatenate
Returns
-------
caspo.core.logicalnetwork.LogicalNetworkList
If other is empty returns self, if self is empty returns other, otherwise a new
:class:`caspo.core.LogicalNetworkList` is created by concatenating self and other.
"""
if len(other) == 0:
return self
elif len(self) == 0:
return other
else:
return LogicalNetworkList(self.hg, np.append(self.__matrix, other.__matrix, axis=0), np.concatenate([self.__networks, other.__networks]))
def append(self, network):
"""
Append a :class:`caspo.core.logicalnetwork.LogicalNetwork` to the list
Parameters
----------
network : :class:`caspo.core.logicalnetwork.LogicalNetwork`
The network to append
"""
arr = network.to_array(self.hg.mappings)
if len(self.__matrix):
self.__matrix = np.append(self.__matrix, [arr], axis=0)
self.__networks = np.append(self.__networks, network.networks)
else:
self.__matrix = np.array([arr])
self.__networks = np.array([network.networks])
def __len__(self):
"""
Returns the number of logical networks
Returns
-------
int
Number of logical networks
"""
return len(self.__matrix)
def __iter__(self):
"""
Iterates over all logical networks in the list
Yields
------
caspo.core.logicalnetwork.LogicalNetwork
The next logical network in the list
"""
for i, arr in enumerate(self.__matrix):
yield LogicalNetwork(((clause, target) for clause, target in self.hg.mappings[np.where(arr == 1)[0]]), networks=self.__networks[i])
def __getitem__(self, index):
"""
Returns logical network(s) at the given index
Parameters
----------
index : object
It can be an int or an iterable of int
Returns
-------
object
Either a :class:`caspo.core.logicalnetwork.LogicalNetwork` or a :class:`caspo.core.logicalnetwork.LogicalNetworkList` object
"""
matrix, networks = self.__matrix[index, :], self.__networks[index]
if hasattr(index, '__iter__'):
return LogicalNetworkList(self.hg, matrix, networks)
else:
return LogicalNetwork(((clause, target) for clause, target in self.hg.mappings[np.where(matrix == 1)[0]]), networks=networks)
def to_funset(self):
"""
Converts the list of logical networks to a set of `clingo.Function`_ instances
Returns
-------
set
Representation of all networks as a set of `clingo.Function`_ instances
.. _clingo.Function: https://potassco.github.io/clingo/python-api/current/clingo.html#-Function
"""
fs = set((clingo.Function("variable", [var]) for var in self.hg.nodes))
formulas = set()
for network in self:
formulas = formulas.union(f for v, f in network.formulas_iter())
formulas = pd.Series(list(formulas))
for i, network in enumerate(self):
for v, f in network.formulas_iter():
fs.add(clingo.Function("formula", [i, v, int(formulas[formulas == f].index[0])]))
for formula_idx, formula in formulas.items():
for clause in formula:
clause_idx = self.hg.clauses_idx[clause]
fs.add(clingo.Function("dnf", [formula_idx, clause_idx]))
for variable, sign in clause:
fs.add(clingo.Function("clause", [clause_idx, variable, sign]))
return fs
def to_dataframe(self, networks=False, dataset=None, size=False, n_jobs=-1):
"""
Converts the list of logical networks to a `pandas.DataFrame`_ object instance
Parameters
----------
networks : boolean
If True, a column with number of networks having the same behavior is included in the DataFrame
dataset: Optional[:class:`caspo.core.dataset.Dataset`]
If not None, a column with the MSE with respect to the given dataset is included in the DataFrame
size: boolean
If True, a column with the size of each logical network is included in the DataFrame
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
Returns
-------
`pandas.DataFrame`_
DataFrame representation of the list of logical networks.
.. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
"""
length = len(self)
df = pd.DataFrame(self.__matrix, columns=[str(m) for m in self.hg.mappings])
if networks:
df = pd.concat([df, pd.DataFrame({'networks': self.__networks})], axis=1)
if dataset is not None:
clampings = dataset.clampings
readouts = dataset.readouts.columns
observations = dataset.readouts.values
pos = ~np.isnan(observations)
mse = Parallel(n_jobs=n_jobs)(delayed(__parallel_mse__)(n, clampings, readouts, observations[pos], pos) for n in self)
df = pd.concat([df, pd.DataFrame({'mse': mse})], axis=1)
if size:
df = pd.concat([df, pd.DataFrame({'size': np.fromiter((n.size for n in self), int, length)})], axis=1)
return df
def to_csv(self, filename, networks=False, dataset=None, size=False, n_jobs=-1):
"""
Writes the list of logical networks to a CSV file
Parameters
----------
filename : str
Absolute path where to write the CSV file
networks : boolean
If True, a column with number of networks having the same behavior is included in the file
dataset: Optional[:class:`caspo.core.dataset.Dataset`]
If not None, a column with the MSE with respect to the given dataset is included
size: boolean
If True, a column with the size of each logical network is included
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
"""
self.to_dataframe(networks, dataset, size, n_jobs).to_csv(filename, index=False)
def frequencies_iter(self):
"""
Iterates over all non-zero frequencies of logical conjunction mappings in this list
Yields
------
tuple[caspo.core.mapping.Mapping, float]
The next pair (mapping,frequency)
"""
f = self.__matrix.mean(axis=0)
for i, m in self.mappings.items():
yield m, f[i]
def frequency(self, mapping):
"""
Returns frequency of a given :class:`caspo.core.mapping.Mapping`
Parameters
----------
mapping : :class:`caspo.core.mapping.Mapping`
A logical conjuntion mapping
Returns
-------
float
Frequency of the given mapping over all logical networks
Raises
------
ValueError
If the given mapping is not found in the mappings of the underlying hypergraph of this list
"""
return self.__matrix[:, self.hg.mappings[mapping]].mean()
def combinatorics(self):
"""
Returns mutually exclusive/inclusive mappings
Returns
-------
(dict,dict)
A tuple of 2 dictionaries.
For each mapping key, the first dict has as value the set of mutually exclusive mappings while
the second dict has as value the set of mutually inclusive mappings.
"""
f = self.__matrix.mean(axis=0)
candidates = np.where((f < 1) & (f > 0))[0]
exclusive, inclusive = defaultdict(set), defaultdict(set)
for i, j in it.combinations(candidates, 2):
xor = np.logical_xor(self.__matrix[:, i], self.__matrix[:, j])
if xor.all():
exclusive[self.hg.mappings[i]].add(self.hg.mappings[j])
exclusive[self.hg.mappings[j]].add(self.hg.mappings[i])
if (~xor).all():
inclusive[self.hg.mappings[i]].add(self.hg.mappings[j])
inclusive[self.hg.mappings[j]].add(self.hg.mappings[i])
return exclusive, inclusive
def predictions(self, setup, n_jobs=-1):
"""
Returns a `pandas.DataFrame`_ with the weighted average predictions and variance of all readouts for each possible
clampings in the given experimental setup.
For each logical network the weight corresponds to the number of networks having the same behavior.
Parameters
----------
setup : :class:`caspo.core.setup.Setup`
Experimental setup
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
Returns
-------
`pandas.DataFrame`_
DataFrame with the weighted average predictions and variance of all readouts for each possible clamping
.. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
.. seealso:: `Wikipedia: Weighted sample variance <https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance>`_
"""
stimuli, inhibitors, readouts = setup.stimuli, setup.inhibitors, setup.readouts
nc = len(setup.cues())
predictions = np.zeros((len(self), 2**nc, len(setup)))
predictions[:, :, :] = Parallel(n_jobs=n_jobs)(delayed(__parallel_predictions__)(n, list(setup.clampings_iter(setup.cues())), readouts, stimuli, inhibitors) for n in self)
avg = np.average(predictions[:, :, nc:], axis=0, weights=self.__networks)
var = np.average((predictions[:, :, nc:]-avg)**2, axis=0, weights=self.__networks)
rcues = ["TR:%s" % c for c in setup.cues(True)]
cols = np.concatenate([rcues, ["AVG:%s" % r for r in readouts], ["VAR:%s" % r for r in readouts]])
#use the first network predictions to extract all clampings
df = pd.DataFrame(np.concatenate([predictions[0, :, :nc], avg, var], axis=1), columns=cols)
df[rcues] = df[rcues].astype(int)
return df
def weighted_mse(self, dataset, n_jobs=-1):
"""
Returns the weighted MSE over all logical networks with respect to the given :class:`caspo.core.dataset.Dataset` object instance.
For each logical network the weight corresponds to the number of networks having the same behavior.
Parameters
----------
dataset: :class:`caspo.core.dataset.Dataset`
Dataset to compute MSE
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
Returns
-------
float
Weighted MSE
"""
predictions = np.zeros((len(self), len(dataset.clampings), len(dataset.setup.readouts)))
predictions[:, :, :] = Parallel(n_jobs=n_jobs)(delayed(__parallel_predictions__)(n, dataset.clampings, dataset.setup.readouts) for n in self)
for i, _ in enumerate(self):
predictions[i, :, :] *= self.__networks[i]
readouts = dataset.readouts.values
pos = ~np.isnan(readouts)
return mean_squared_error(readouts[pos], (np.sum(predictions, axis=0) / np.sum(self.__networks))[pos])
def __plot__(self):
"""
Returns a `networkx.MultiDiGraph`_ ready for plotting. Edges weights correspond to mappings frequencies.
Returns
-------
`networkx.MultiDiGraph`_
Network object instance ready for plotting
.. _networkx.MultiDiGraph: https://networkx.readthedocs.io/en/stable/reference/classes.multidigraph.html#networkx.MultiDiGraph
"""
graph = nx.MultiDiGraph()
n_gates = 1
for mapping in self.hg.mappings[np.unique(np.where(self.__matrix == 1)[1])]:
graph.add_node(mapping.target)
if len(mapping.clause) > 1:
gate = 'gate-%s' % n_gates
n_gates += 1
graph.add_node(gate, gate=True)
graph.add_edge(gate, mapping.target, sign=1, weight=self.frequency(mapping))
for var, sign in mapping.clause:
graph.add_node(var)
graph.add_edge(var, gate, sign=sign, weight=self.frequency(mapping))
else:
for var, sign in mapping.clause:
graph.add_node(var)
graph.add_edge(var, mapping.target, sign=sign, weight=self.frequency(mapping))
return graph
class LogicalNetwork(nx.DiGraph):
"""
Logical network class extends `networkx.DiGraph`_ with nodes being,
either :class:`caspo.core.clause.Clause` object instances or species names (str).
Attributes
----------
networks : int
Number of networks having the same behavior (including this network as the representative network)
.. _networkx.DiGraph: https://networkx.readthedocs.io/en/stable/reference/classes.digraph.html#networkx.DiGraph
"""
@classmethod
def from_hypertuples(cls, hg, tuples):
"""
Creates a logical network from an iterable of integer tuples matching mappings in the given
:class:`caspo.core.hypergraph.HyperGraph`
Parameters
----------
hg : :class:`caspo.core.hypergraph.HyperGraph`
Underlying hypergraph
tuples : (int,int)
tuples matching mappings in the given hypergraph
Returns
-------
caspo.core.logicalnetwork.LogicalNetwork
Created object instance
"""
return cls([(hg.clauses[j], hg.variable(i)) for i, j in tuples], networks=1)
@property
def networks(self):
return self.graph.get('networks', 1)
def to_graph(self):
"""
Converts the logical network to its underlying interaction graph
Returns
-------
caspo.core.graph.Graph
The underlying interaction graph
"""
edges = set()
for clause, target in self.edges():
for source, signature in clause:
edges.add((source, target, signature))
return Graph.from_tuples(edges)
@property
def size(self):
"""
int: The size (complexity) of this logical network as the sum of clauses' length
"""
return sum([len(c) for c, _ in self.edges()])
def step(self, state, clamping):
"""
Performs a simulation step from the given state and with respect to the given clamping
Parameters
----------
state : dict
The key-value mapping describing the current state of the logical network
clamping : caspo.core.clamping.Clamping
A clamping over variables in the logical network
Returns
-------
dict
The key-value mapping describing the next state of the logical network
"""
ns = state.copy()
for var in state:
if clamping.has_variable(var):
ns[var] = int(clamping.bool(var))
else:
or_value = 0
for clause, _ in self.in_edges(var):
or_value = or_value or clause.bool(state)
if or_value:
break
ns[var] = int(or_value)
return ns
def fixpoint(self, clamping, steps=0):
"""
Computes the fixpoint with respect to a given :class:`caspo.core.clamping.Clamping`
Parameters
----------
clamping : :class:`caspo.core.clamping.Clamping`
The clamping with respect to the fixpoint is computed
steps : int
If greater than zero, a maximum number of steps is performed. Otherwise
it continues until reaching a fixpoint. Note that if no fixpoint exists,
e.g. a network with a negative feedback-loop, this will never end unless
you provide a maximum number of steps.
Returns
-------
dict
The key-value mapping describing the state of the logical network
"""
current = dict.fromkeys(self.variables(), 0)
updated = self.step(current, clamping)
steps -= 1
while current != updated and steps != 0:
current = updated
updated = self.step(current, clamping)
return current
def predictions(self, clampings, readouts, stimuli=None, inhibitors=None, nclampings=-1):
"""
Computes network predictions for the given iterable of clampings
Parameters
----------
clampings : iterable
Iterable over clampings
readouts : list[str]
List of readouts names
stimuli : Optional[list[str]]
List of stimuli names
inhibitors : Optional[list[str]]
List of inhibitors names
nclampings : Optional[int]
If greater than zero, it must be the number of clampings in the iterable. Otherwise,
clampings must implement the special method :func:`__len__`
Returns
-------
`pandas.DataFrame`_
DataFrame with network predictions for each clamping. If stimuli and inhibitors are given,
columns are included describing each clamping. Otherwise, columns correspond to readouts only.
.. _pandas.DataFrame: http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe
"""
stimuli, inhibitors = stimuli or [], inhibitors or []
cues = stimuli + inhibitors
nc = len(cues)
ns = len(stimuli)
predictions = np.zeros((nclampings if nclampings > 0 else len(clampings), nc+len(readouts)), dtype=np.int8)
for i, clamping in enumerate(clampings):
if nc > 0:
arr = clamping.to_array(cues)
arr[np.where(arr[:ns] == -1)[0]] = 0
arr[ns + np.where(arr[ns:] == -1)[0]] = 1
predictions[i, :nc] = arr
fixpoint = self.fixpoint(clamping)
for j, readout in enumerate(readouts):
predictions[i, nc+j] = fixpoint.get(readout, 0)
return pd.DataFrame(predictions, columns=np.concatenate([stimuli, [i+'i' for i in inhibitors], readouts]))
def mse(self, dataset):
"""
Returns the Mean Squared Error with respect to the given :class:`caspo.core.dataset.Dataset` object
Parameters
----------
dataset : :class:`caspo.core.dataset.Dataset`
Dataset to compute MSE
Returns
-------
float
Computed mean squared error
"""
clampings = dataset.clampings
readouts = dataset.readouts.columns
observations = dataset.readouts.values
pos = ~np.isnan(observations)
return mean_squared_error(observations, (self.predictions(clampings, readouts).values)[pos])
def variables(self):
"""
Returns variables in the logical network
Returns
-------
set[str]
Unique variables names
"""
variables = set()
for v in self.nodes():
if isinstance(v, Clause):
for l in v:
variables.add(l.variable)
else:
variables.add(v)
return variables
def formulas_iter(self):
"""
Iterates over all variable-clauses in the logical network
Yields
------
tuple[str,frozenset[caspo.core.clause.Clause]]
The next tuple of the form (variable, set of clauses) in the logical network.
"""
for var in (v for v in self.variables() if self.has_node(v)):
yield var, frozenset(self.predecessors(var))
def to_array(self, mappings):
"""
Converts the logical network to a binary array with respect to the given mappings from a
:class:`caspo.core.hypergraph.HyperGraph` object instance.
Parameters
----------
mappings : :class:`caspo.core.mapping.MappingList`
Mappings to create the binary array
Returns
-------
`numpy.ndarray`_
Binary array with respect to the given mappings describing the logical network.
Position `i` in the array will be 1 if the network has the mapping at position `i`
in the given list of mappings.
.. _numpy.ndarray: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray
"""
arr = np.zeros(len(mappings), np.int8)
for i, (clause, target) in enumerate(mappings):
if self.has_edge(clause, target):
arr[i] = 1
return arr
def __plot__(self):
"""
Returns a `networkx.MultiDiGraph`_ ready for plotting.
Returns
-------
`networkx.MultiDiGraph`_
Network object instance ready for plotting
.. _networkx.MultiDiGraph: https://networkx.readthedocs.io/en/stable/reference/classes.multidigraph.html#networkx.MultiDiGraph
"""
graph = nx.MultiDiGraph()
n_gates = 1
for target, formula in self.formulas_iter():
graph.add_node(target)
for clause in formula:
if len(clause) > 1:
gate = 'gate-%s' % n_gates
n_gates += 1
graph.add_node(gate, gate=True)
graph.add_edge(gate, target, sign=1)
for var, sign in clause:
graph.add_node(var)
graph.add_edge(var, gate, sign=sign)
else:
for var, sign in clause:
graph.add_node(var)
graph.add_edge(var, target, sign=sign)
return graph
| gpl-3.0 |
iismd17/scikit-learn | examples/ensemble/plot_feature_transformation.py | 67 | 4285 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator)
rt_lm = LogisticRegression()
rt.fit(X_train, y_train)
rt_lm.fit(rt.transform(X_train_lr), y_train_lr)
y_pred_rt = rt_lm.predict_proba(rt.transform(X_test))[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show() | bsd-3-clause |
SeonghoBaek/RealtimeCamera | FDN.py | 1 | 42078 | import tensorflow as tf
import numpy as np
import os
import cv2
from sklearn.utils import shuffle
import util
import layers
import openface
import redis
import socket
import array
import struct
import sys
import dlib
import argparse
import csv
import pickle
import shutil
LAMBDA = 1e-3
GAMMA = 1.0
CENTER_LOSS_ALPHA = 0.5
DISTANCE_MARGIN = 10.0
representation_dim = 128
input_width = 96
input_height = 96
scale_size = 112.0
num_channel = 3
num_patch = 4
batch_size = 16
test_size = 100
num_class_per_group = 69
num_epoch = 300
# Network Parameters
g_fc_layer1_dim = 1024
g_fc_layer2_dim = 512 # Final representation
g_fc_layer3_dim = 128
g_dense_block_layers = 4
g_dense_block_depth = 128
lstm_hidden_size_layer1 = 128
lstm_hidden_size_layer2 = 128
lstm_sequence_length = 96
lstm_representation_dim = 64
dlibDetector = dlib.get_frontal_face_detector()
align = openface.AlignDlib('openface/models/dlib/shape_predictor_68_face_landmarks.dat')
with tf.device('/device:CPU:0'):
ANCHOR = tf.placeholder(tf.float32, [None, 24, 24, 128])
bn_train = tf.placeholder(tf.bool)
keep_prob = tf.placeholder(tf.float32)
def get_center_loss(features, labels):
with tf.variable_scope('center', reuse=tf.AUTO_REUSE):
centers = tf.get_variable('centers')
len_features = features.get_shape()[1]
labels = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
loss = tf.reduce_mean(tf.reduce_sum((features - centers_batch)**2, [1]))
# Center distance loss
shuffle_labels = tf.random.shuffle(labels)
shuffle_centers = tf.gather(centers, shuffle_labels)
distance_loss = DISTANCE_MARGIN / tf.reduce_mean(tf.reduce_sum((centers_batch - shuffle_centers)**2, [1]))
return loss, distance_loss
def update_centers(features, labels, alpha):
with tf.variable_scope('center', reuse=tf.AUTO_REUSE):
centers = tf.get_variable('centers')
labels = tf.reshape(labels, [-1]) # flatten
centers_batch = tf.gather(centers, labels) # Gather center tensor by labels value order
diff = centers_batch - features # L1 distance array between each of center and feature
unique_label, unique_idx, unique_count = tf.unique_with_counts(labels)
appear_times = tf.gather(unique_count, unique_idx)
appear_times = tf.reshape(appear_times, [-1, 1])
diff = diff / tf.cast((1 + appear_times), tf.float32)
diff = alpha * diff
centers = tf.scatter_sub(centers, labels, diff)
return centers
def fc_network(x, pretrained=False, weights=None, biases=None, activation='swish', scope='fc_network', bn_phaze=False, keep_prob=0.5):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if activation == 'swish':
act_func = util.swish
elif activation == 'relu':
act_func = tf.nn.relu
else:
act_func = tf.nn.sigmoid
g_fc_layer1 = layers.fc(x, g_fc_layer1_dim, use_bias=False, scope='g_fc_layer1')
g_fc_layer1 = layers.batch_norm(g_fc_layer1, bn_phaze, scope='g_fc_layer1_bn')
g_fc_layer1 = act_func(g_fc_layer1)
g_fc_layer1 = tf.nn.dropout(g_fc_layer1, keep_prob=keep_prob)
g_fc_layer2 = layers.fc(g_fc_layer1, g_fc_layer2_dim, use_bias=False, scope='g_fc_layer2')
g_fc_layer2 = layers.batch_norm(g_fc_layer2, bn_phaze, scope='g_fc_layer2_bn')
g_fc_layer2 = act_func(g_fc_layer2)
g_fc_layer2 = tf.nn.dropout(g_fc_layer2, keep_prob=keep_prob)
g_fc_layer3 = layers.fc(g_fc_layer2, g_fc_layer3_dim, use_bias=False, scope='g_fc_layer3')
g_fc_layer3 = layers.batch_norm(g_fc_layer3, bn_phaze, scope='g_fc_layer3_bn')
g_fc_layer3 = act_func(g_fc_layer3)
g_fc_layer3 = tf.nn.dropout(g_fc_layer3, keep_prob=keep_prob)
return g_fc_layer3
def lstm_network(input_data, scope='lstm_network', forget_bias=1.0, keep_prob=0.5):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(lstm_representation_dim/2)
lstm_fw_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_fw_cell, output_keep_prob=keep_prob)
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(lstm_representation_dim/2)
lstm_bw_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_bw_cell, output_keep_prob=keep_prob)
_, states = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, input_data, dtype=tf.float32)
#print(states)
#print(states[0])
#print(states[1])
states_concat = tf.concat([states[0].h, states[1].h], 1)
print('LSTM Representation Dimension: ' + str(states_concat.get_shape().as_list()))
return states_concat
def decoder_network(latent, anchor_layer=None, activation='swish', scope='g_decoder_network', bn_phaze=False):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if activation == 'swish':
act_func = util.swish
elif activation == 'relu':
act_func = tf.nn.relu
else:
act_func = tf.nn.sigmoid
#l = tf.cond(bn_phaze, lambda: latent, lambda: make_multi_modal_noise(8))
l = tf.cond(bn_phaze, lambda: latent, lambda: latent)
l = layers.fc(l, 6*6*32, non_linear_fn=act_func)
print('decoder input:', str(latent.get_shape().as_list()))
l = tf.reshape(l, shape=[-1, 6, 6, 32])
print('deconv1:', str(l.get_shape().as_list()))
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=False, scope='block_0')
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True, scope='block_0_1')
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True, scope='block_0_2')
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn1')
l = act_func(l)
# 12 x 12
l = layers.deconv(l, b_size=batch_size, scope='g_dec_conv2', filter_dims=[3, 3, g_dense_block_depth],
stride_dims=[2, 2], padding='SAME', non_linear_fn=act_func)
print('deconv2:', str(l.get_shape().as_list()))
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_1', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_1_1', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_1_2', use_dilation=True)
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn2')
l = act_func(l)
# 24 x 24
l = layers.deconv(l, b_size=batch_size, scope='g_dec_conv3', filter_dims=[3, 3, g_dense_block_depth],
stride_dims=[2, 2], padding='SAME', non_linear_fn=act_func)
print('deconv3:', str(l.get_shape().as_list()))
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_2', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_2_1', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_2_2', use_dilation=True)
if anchor_layer is not None:
# anchor_layer = anchor_layer + tf.random_normal(shape=tf.shape(anchor_layer), mean=0.0, stddev=1.0, dtype=tf.float32)
l = tf.concat([l, anchor_layer], axis=3)
else:
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn3')
l = act_func(l)
# 48 x 48
l = layers.deconv(l, b_size=batch_size, scope='g_dec_conv4', filter_dims=[3, 3, g_dense_block_depth],
stride_dims=[2, 2], padding='SAME', non_linear_fn=act_func)
print('deconv4:', str(l.get_shape().as_list()))
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_3', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_3_1', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_3_2', use_dilation=True)
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn4')
l = act_func(l)
l = layers.self_attention(l, g_dense_block_depth, act_func=act_func)
# 96 x 96
l = layers.deconv(l, b_size=batch_size, scope='g_dec_conv5', filter_dims=[3, 3, g_dense_block_depth],
stride_dims=[2, 2], padding='SAME', non_linear_fn=act_func)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_4', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_4_1', use_dilation=True)
l = add_residual_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, use_residual=True,
scope='block_4_2', use_dilation=True)
l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, 3], act_func=act_func,
scope='dense_transition_1', bn_phaze=bn_phaze, use_pool=False)
#l = act_func(l)
print('deconv5:', str(l.get_shape().as_list()))
return l
def latent_discriminator(input_data, activation='swish', scope='ldiscriminator', reuse=False, bn_phaze=False):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
#if reuse:
# tf.get_variable_scope().reuse_variables()
if activation == 'swish':
act_func = util.swish
elif activation == 'relu':
act_func = tf.nn.relu
elif activation == 'tanh':
act_func = tf.nn.tanh
else:
act_func = tf.nn.sigmoid
l = tf.reshape(input_data, shape=[-1, 4, 4, 8])
l = layers.conv(l, scope='conv1', filter_dims=[3, 3, g_dense_block_depth/2], stride_dims=[1, 1],
non_linear_fn=None, bias=False)
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
act_func=act_func, bn_phaze=bn_phaze, scope='block_0')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
act_func=act_func, bn_phaze=bn_phaze, scope='block_1')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
act_func=act_func, bn_phaze=bn_phaze, scope='block_2')
l = layers.global_avg_pool(l, representation_dim)
dc_final_layer = l
dc_output = layers.fc(dc_final_layer, scope='g_enc_z_fc', out_dim=1, non_linear_fn=None)
return dc_final_layer, dc_output, tf.nn.tanh(dc_output)
def discriminator(input_data, activation='swish', scope='discriminator', reuse=False, bn_phaze=False):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
#if reuse:
# tf.get_variable_scope().reuse_variables()
if activation == 'swish':
act_func = util.swish
elif activation == 'relu':
act_func = tf.nn.relu
elif activation == 'tanh':
act_func = tf.nn.tanh
else:
act_func = tf.nn.sigmoid
l = layers.conv(input_data, scope='conv1', filter_dims=[3, 3, g_dense_block_depth/2], stride_dims=[1, 1],
non_linear_fn=None, bias=False, sn=True)
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
act_func=act_func, bn_phaze=bn_phaze, scope='block_0', sn=True)
l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
act_func=act_func, bn_phaze=bn_phaze, scope='block_1', sn=True)
l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
act_func=act_func, bn_phaze=bn_phaze, scope='block_2', sn=True)
#l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
# act_func=act_func, bn_phaze=bn_phaze, scope='block_3')
#l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
# act_func=act_func, bn_phaze=bn_phaze, scope='block_4')
#l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth/2], num_layers=3,
# act_func=act_func, bn_phaze=bn_phaze, scope='block_5')
#l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
# act_func=act_func, bn_phaze=bn_phaze, scope='block_6')
#l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=3,
# act_func=act_func, bn_phaze=bn_phaze, scope='block_7')
# dc_final_layer = batch_norm_conv(last_dense_layer, b_train=bn_phaze, scope='last_dense_layer')
l = layers.global_avg_pool(l, representation_dim)
dc_final_layer = l
dc_output = layers.fc(dc_final_layer, scope='g_enc_z_fc', out_dim=1, non_linear_fn=None)
return dc_final_layer, dc_output, tf.sigmoid(dc_output)
def add_residual_block(in_layer, filter_dims, num_layers, act_func=tf.nn.relu,
bn_phaze=False, use_residual=True, scope='residual_block', use_dilation=False, sn=False):
with tf.variable_scope(scope):
l = in_layer
input_dims = in_layer.get_shape().as_list()
num_channel_in = input_dims[-1]
num_channel_out = input_dims[-1]
dilation = [1, 1, 1, 1]
if use_dilation == True:
dilation = [1, 2, 2, 1]
for i in range(num_layers):
l = layers.add_residual_layer(l, filter_dims=filter_dims, act_func=act_func, bn_phaze=bn_phaze,
scope='layer' + str(i), dilation=dilation, sn=sn)
if use_residual is True:
l = tf.add(l, in_layer)
return l
def add_residual_dense_block(in_layer, filter_dims, num_layers, act_func=tf.nn.relu,
bn_phaze=False, scope='residual_dense_block', use_dilation=False, sn=False):
with tf.variable_scope(scope):
l = in_layer
input_dims = in_layer.get_shape().as_list()
num_channel_in = input_dims[-1]
num_channel_out = filter_dims[-1]
dilation = [1, 1, 1, 1]
if use_dilation == True:
dilation = [1, 2, 2, 1]
for i in range(num_layers):
l = layers.add_dense_layer(l, filter_dims=filter_dims, act_func=act_func, bn_phaze=bn_phaze,
scope='layer' + str(i), dilation=dilation, sn=sn)
l = layers.add_dense_transition_layer(l, filter_dims=[1, 1, num_channel_out], act_func=act_func,
scope='dense_transition_1', bn_phaze=bn_phaze, use_pool=False, sn=sn)
l = tf.add(l, in_layer)
return l
def encoder_network(x, activation='relu', scope='encoder_network', reuse=False, bn_phaze=False, keep_prob=0.5):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
#if reuse:
# tf.get_variable_scope().reuse_variables()
if activation == 'swish':
act_func = util.swish
elif activation == 'relu':
act_func = tf.nn.relu
else:
act_func = tf.nn.sigmoid
# [96 x 96]
l = layers.conv(x, scope='conv1', filter_dims=[3, 3, g_dense_block_depth], stride_dims=[1, 1],
dilation=[1, 2, 2, 1], non_linear_fn=None, bias=False)
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_0', use_dilation=True)
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_1', use_dilation=True)
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn1')
l = act_func(l)
# [48 x 48]
l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_2', use_dilation=True)
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_3', use_dilation=True)
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn2')
l = act_func(l)
# [24 x 24]
l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_4')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_5')
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn3')
l = act_func(l)
l_share = l
# [12 x 12]
l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_6')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_7')
l = layers.batch_norm_conv(l, b_train=bn_phaze, scope='bn4')
l = act_func(l)
# [6 x 6]
l = tf.nn.avg_pool(l, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_8')
l = add_residual_dense_block(l, filter_dims=[3, 3, g_dense_block_depth], num_layers=2,
act_func=act_func, bn_phaze=bn_phaze, scope='block_9')
with tf.variable_scope('dense_block_last'):
scale_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, representation_dim],
act_func=act_func,
scope='dense_transition_1', bn_phaze=bn_phaze,
use_pool=False)
last_dense_layer = layers.add_dense_transition_layer(l, filter_dims=[1, 1, representation_dim],
act_func=act_func,
scope='dense_transition_2', bn_phaze=bn_phaze,
use_pool=False)
scale_layer = act_func(scale_layer)
last_dense_layer = act_func(last_dense_layer)
return last_dense_layer, scale_layer, l_share
def load_images_patch(filename, b_align=False):
images = []
#lstm_images = []
if b_align == True:
img = get_align_image(filename)
if len([img]) == 0:
return []
else:
jpg_img = cv2.imread(filename)
img = cv2.cvtColor(jpg_img, cv2.COLOR_BGR2RGB)
#grey_img = cv2.cvtColor(jpg_img, cv2.COLOR_BGR2GRAY)
if img is not None:
img = np.array(img)
images.append(img)
#lstm_image = cv2.resize(grey_img, dsize=(lstm_sequence_length, lstm_sequence_length))
#lstm_images.append(lstm_image)
img = cv2.resize(img, dsize=(scale_size, scale_size))
#grey_img = cv2.resize(grey_img, dsize=(scale_size, scale_size))
dy = np.random.random_integers(low=1, high=img.shape[0]-input_height, size=num_patch-1)
dx = np.random.random_integers(low=1, high=img.shape[1]-input_width, size=num_patch-1)
window = zip(dy, dx)
for i in range(len(window)):
croped = img[window[i][0]:window[i][0]+input_height, window[i][1]:window[i][1]+input_width].copy()
#cv2.imwrite(filename + '_crop_' + str(i) + '.jpg', croped)
images.append(croped)
#croped_grey = grey_img[window[i][0]:window[i][0] + input_height,
# window[i][1]:window[i][1] + input_width].copy()
#lstm_images.append(croped_grey)
images = np.array(images)
#lstm_images = np.array(lstm_images)
#return images, lstm_images
return images
def load_images_from_folder(folder, use_augmentation=False):
images = []
for filename in os.listdir(folder):
fullname = os.path.join(folder, filename).replace("\\", "/")
jpg_img = cv2.imread(fullname)
img = cv2.cvtColor(jpg_img, cv2.COLOR_BGR2RGB) # To RGB format
#grey_img = cv2.cvtColor(jpg_img, cv2.COLOR_BGR2GRAY)
if img is not None:
img = np.array(img)
#grey_img = np.array(grey_img)
if use_augmentation == True:
w = img.shape[1]
h = img.shape[0]
if h > w:
scale = scale_size / w
else:
scale = scale_size / h
#print('w: ' + str(w) + ', h: ' + str(h) + ', scale: ' + str(scale))
if scale > 1.0:
img = cv2.resize(img, dsize=(0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
#grey_img = cv2.resize(grey_img, dsize=(0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
else:
img = cv2.resize(img, dsize=(0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
#grey_img = cv2.resize(grey_img, dsize=(0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
dy = np.random.random_integers(low=1, high=img.shape[0]-input_height, size=num_patch)
dx = np.random.random_integers(low=1, high=img.shape[1]-input_width, size=num_patch)
window = zip(dy, dx)
for i in range(len(window)):
croped = img[window[i][0]:window[i][0]+input_height, window[i][1]:window[i][1]+input_width].copy()
croped = croped / 255.0
#cv2.imwrite(filename + '_crop_' + str(i) + '.jpg', croped)
images.append(croped)
else:
img = img / 255.0
images.append(img)
return np.array(images)
def get_align_image(img_file_path):
bgrImg = cv2.imread(img_file_path)
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
#print('Dlib bbox detect')
bbs = dlibDetector(rgbImg, 1)
if len(bbs) == 0:
#print('No bbox')
return [], []
alignedFace = align.align(input_width, rgbImg, bbs[0], landmarkIndices=[8, 36, 45], skipMulti=True)
#alignedFace = np.array(alignedFace).reshape(-1, input_width, input_height, num_channel)
return alignedFace # RGB format
def get_feature_matching_loss(value, target, type='l1', gamma=1.0):
if type == 'rmse':
loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(target, value))))
elif type == 'cross-entropy':
eps = 1e-10
loss = tf.reduce_mean(-1 * target * tf.log(value + eps) - 1 * (1 - target) * tf.log(1 - value + eps))
elif type == 'l1':
loss = tf.reduce_mean(tf.abs(tf.subtract(target, value)))
elif type == 'l2':
#loss = tf.reduce_mean(tf.square(tf.subtract(target, value)))
loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(target, value)))))
return gamma * loss
def get_discriminator_loss(real, fake, type='wgan', gamma=1.0):
if type == 'wgan':
# wgan loss
d_loss_real = tf.reduce_mean(real)
d_loss_fake = tf.reduce_mean(fake)
# W Distant: f(real) - f(fake). Maximizing W Distant.
return gamma * (d_loss_fake - d_loss_real), d_loss_real, d_loss_fake
elif type == 'ce':
# cross entropy
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=real, labels=tf.ones_like(real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=fake, labels=tf.zeros_like(fake)))
return gamma * (d_loss_fake + d_loss_real), d_loss_real, d_loss_fake
elif type == 'hinge':
d_loss_real = tf.reduce_mean(tf.nn.relu(1.0 - real))
d_loss_fake = tf.reduce_mean(tf.nn.relu(1.0 + fake))
return gamma * (d_loss_fake + d_loss_real), d_loss_real, d_loss_fake
def get_residual_loss(value, target, type='l1', gamma=1.0):
if type == 'rmse':
loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(target, value))))
elif type == 'cross-entropy':
eps = 1e-10
loss = tf.reduce_mean(-1 * target * tf.log(value + eps) - 1 * (1 - target) * tf.log(1 - value + eps))
elif type == 'l1':
loss = tf.reduce_mean(tf.abs(tf.subtract(target, value)))
elif type == 'l2':
loss = tf.reduce_mean(tf.square(tf.subtract(target, value)))
loss = gamma * loss
return loss
def make_multi_modal_noise(num_mode=8):
noise = tf.random_normal(shape=[batch_size, 16], mean=0.0, stddev=1.0, dtype=tf.float32)
for i in range(num_mode-1):
n = tf.random_normal(shape=[batch_size, 16], mean=0.0, stddev=1.0, dtype=tf.float32)
noise = tf.concat([noise, n], axis=1)
return noise
def train(model_path):
trX = []
trY = []
dir_list = os.listdir(imgs_dirname)
dir_list.sort(key=str.lower)
one_hot_length = len(os.listdir(imgs_dirname))
with tf.device('/device:CPU:0'):
for idx, labelname in enumerate(dir_list):
imgs_list = load_images_from_folder(os.path.join(imgs_dirname, labelname))
imgs_list = shuffle(imgs_list)
label = np.zeros(one_hot_length)
label[idx] += 1
print('label:', labelname, label)
for idx2, img in enumerate(imgs_list):
trY.append(label)
'''
if idx2 < len(imgs_list) * 0.2:
# SpecAugment
w = np.random.randint(len(img)/10) # Max 10% width
h = np.random.randint(len(img) - w + 1)
img[h:h + w] = [[0, 0, 0]]
img = np.transpose(img, [1, 0, 2])
w = np.random.randint(len(img)/10) # Max 10% width
h = np.random.randint(len(img) - w + 1)
img[h:h + w] = [[0, 0, 0]]
img = np.transpose(img, [1, 0, 2])
#cv2.imwrite(labelname + str(idx2) + '.jpg', img)
'''
trX.append(img)
trX, trY = shuffle(trX, trY)
trX = np.array(trX)
trY = np.array(trY)
trX = trX.reshape(-1, input_height, input_width, num_channel)
X = tf.placeholder(tf.float32, [None, input_height, input_width, num_channel])
#Y = tf.placeholder(tf.float32, [None, num_class_per_group])
# Network setup
cnn_representation, _, anchor_layer = encoder_network(X, activation='swish', bn_phaze=bn_train, scope='encoder')
print('CNN Output Tensor Dimension: ' + str(cnn_representation.get_shape().as_list()))
cnn_representation = layers.global_avg_pool(cnn_representation, representation_dim, scope='encoder')
print('CNN Representation Dimension: ' + str(cnn_representation.get_shape().as_list()))
latent_fake = cnn_representation
with tf.device('/device:GPU:1'):
#decoder_input = make_multi_modal_noise(representation, num_mode=8)
latent_real = make_multi_modal_noise(num_mode=8)
X_fake = decoder_network(latent=cnn_representation, anchor_layer=None, activation='swish', scope='decoder', bn_phaze=bn_train)
p_feature, p_logit, p_prob = latent_discriminator(latent_real, activation='swish', scope='discriminator', bn_phaze=bn_train)
n_feature, n_logit, n_prob = latent_discriminator(latent_fake, activation='swish', scope='discriminator', bn_phaze=bn_train)
# Trainable variable lists
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
encoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
decoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
generator_vars = encoder_var + decoder_var
gan_g_vars = encoder_var
with tf.device('/device:GPU:1'):
residual_loss = get_residual_loss(X, X_fake, type='l1', gamma=1.0)
feature_matching_loss = get_feature_matching_loss(p_feature, n_feature, type='l1', gamma=1.0)
# Cross Entropy
gan_g_loss = -tf.reduce_mean(n_prob)
#gan_g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=n_logit, labels=tf.ones_like(n_logit)))
#discriminator_loss, loss_real, loss_fake = get_discriminator_loss(p_prob, n_prob, type='wgan', gamma=1.0)
discriminator_loss, loss_real, loss_fake = get_discriminator_loss(p_prob, n_prob, type='hinge', gamma=1.0)
# training operation
d_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(discriminator_loss, var_list=d_vars)
g_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(residual_loss)
gan_g_optimzier = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(gan_g_loss, var_list=gan_g_vars)
f_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(feature_matching_loss, var_list=gan_g_vars)
# Launch the graph in a session
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
try:
saver = tf.train.Saver()
saver.restore(sess, model_path)
print('Model loaded')
except:
print('Start New Training. Wait ...')
num_itr = 0
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX) + 1, batch_size))
for i in range(num_epoch):
trX, trY = shuffle(trX, trY)
for start, end in training_batch:
with tf.device('/device:CPU:0'):
style_trX = shuffle(trX[start:end])
#style_trX = trX[start:end]
anchor, latent = sess.run([anchor_layer, cnn_representation], feed_dict={X: style_trX, bn_train: True, keep_prob: 0.5})
_, r, fake = sess.run(
[g_optimizer, residual_loss, X_fake],
feed_dict={X: trX[start:end], ANCHOR: anchor,
bn_train: True,
keep_prob: 0.5})
'''
idx = 0
for sample in fake:
#sample = fake[0]
sample = cv2.cvtColor(sample, cv2.COLOR_RGB2BGR)
sample = cv2.resize(sample, dsize=(0, 0), fx=4.0, fy=4.0, interpolation=cv2.INTER_CUBIC)
sample = cv2.resize(sample, dsize=(0, 0), fx=0.25, fy=0.25, interpolation=cv2.INTER_AREA)
cv2.imwrite('sample' + str(num_itr) + '_' + str(idx) + '.jpg', sample)
idx = idx + 1
'''
sample = fake[0] * 255.0
sample = cv2.cvtColor(sample, cv2.COLOR_RGB2BGR)
#sample = cv2.resize(sample, dsize=(0, 0), fx=4.0, fy=4.0, interpolation=cv2.INTER_CUBIC)
#sample = cv2.resize(sample, dsize=(0, 0), fx=0.25, fy=0.25, interpolation=cv2.INTER_AREA)
cv2.imwrite('training_status/sample' + str(num_itr) + '.jpg', sample)
_, d = sess.run(
[d_optimizer, discriminator_loss],
feed_dict={X: trX[start:end], ANCHOR: anchor,
bn_train: True,
keep_prob: 0.5})
#trX[start:end], trY[start:end] = shuffle(trX[start:end], trY[start:end])
#_, f = sess.run(
# [f_optimizer, feature_matching_loss],
# feed_dict={X: trX[start:end], Y: trY[start:end], ANCHOR: anchor,
# bn_train: True,
# keep_prob: 0.5})
_, g = sess.run(
[gan_g_optimzier, gan_g_loss],
feed_dict={X: trX[start:end],
bn_train: True,
keep_prob: 0.5})
num_itr = num_itr + 1
if num_itr % 10 == 0:
print('epoch #' + str(i) + ', itr #' + str(num_itr))
print(' - residual loss: ' + str(r))
print(' - discriminator loss: ' + str(d))
print(' - generator loss: ' + str(g))
#print(' - feature matching loss: ' + str(f))
try:
saver.save(sess, model_path)
except:
print('Save failed')
def test(model_path, test_image_dir):
trX = []
trY = []
test_output_dir = 'gan'
if os.path.exists(test_output_dir) == False:
os.mkdir(test_output_dir)
with tf.device('/device:CPU:0'):
test_image_dir_list = os.listdir(test_image_dir)
for idx, labelname in enumerate(test_image_dir_list):
if os.path.isdir(os.path.join(test_image_dir, labelname).replace("\\", "/")) is False:
continue
if os.path.exists(os.path.join(test_output_dir, labelname)) is False:
os.mkdir(os.path.join(test_output_dir, labelname))
imgs_list = load_images_from_folder(os.path.join(test_image_dir, labelname))
for idx2, img in enumerate(imgs_list):
trY.append(os.path.join(test_output_dir, labelname))
trX.append(img)
trX = np.array(trX)
trY = np.array(trY)
trX = trX.reshape(-1, input_height, input_width, num_channel)
# Network setup
X = tf.placeholder(tf.float32, [None, input_height, input_width, num_channel])
cnn_representation, _, anchor_layer = encoder_network(X, activation='swish', bn_phaze=bn_train, scope='encoder')
print('CNN Output Tensor Dimension: ' + str(cnn_representation.get_shape().as_list()))
cnn_representation = layers.global_avg_pool(cnn_representation, representation_dim, scope='encoder')
print('CNN Representation Dimension: ' + str(cnn_representation.get_shape().as_list()))
latent_fake = cnn_representation
with tf.device('/device:GPU:1'):
# decoder_input = make_multi_modal_noise(representation, num_mode=8)
latent_real = make_multi_modal_noise(num_mode=8)
X_fake = decoder_network(latent=cnn_representation, anchor_layer=None, activation='swish', scope='decoder',
bn_phaze=bn_train)
p_feature, p_logit, p_prob = latent_discriminator(latent_real, activation='relu', scope='discriminator',
bn_phaze=bn_train)
n_feature, n_logit, n_prob = latent_discriminator(latent_fake, activation='relu', scope='discriminator',
bn_phaze=bn_train)
# Trainable variable lists
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
encoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
decoder_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
generator_vars = encoder_var + decoder_var
gan_g_vars = encoder_var
with tf.device('/device:GPU:1'):
residual_loss = get_residual_loss(X, X_fake, type='l1', gamma=1.0)
feature_matching_loss = get_feature_matching_loss(p_feature, n_feature, type='l2', gamma=1.0)
# Cross Entropy
gan_g_loss = -tf.reduce_mean(n_prob)
# gan_g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=n_logit, labels=tf.ones_like(n_logit)))
# discriminator_loss, loss_real, loss_fake = get_discriminator_loss(p_prob, n_prob, type='wgan', gamma=1.0)
discriminator_loss, loss_real, loss_fake = get_discriminator_loss(p_prob, n_prob, type='hinge', gamma=1.0)
# training operation
d_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(discriminator_loss, var_list=d_vars)
g_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(residual_loss)
gan_g_optimzier = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(gan_g_loss, var_list=gan_g_vars)
f_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(feature_matching_loss, var_list=gan_g_vars)
# Launch the graph in a session
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
try:
saver = tf.train.Saver()
saver.restore(sess, model_path)
print('Model loaded')
except:
print('Model loading failed')
return
i = 0
for img in trX:
#latent, anchor = sess.run([latent_real, anchor_layer], feed_dict={X: [img], bn_train: False, keep_prob: 1.0})
fake = sess.run(
[X_fake],
feed_dict={X: [img],
bn_train: False,
keep_prob: 1.0})
sample = fake[0][0] * 255
#print(sample.shape)
sample = cv2.cvtColor(sample, cv2.COLOR_RGB2BGR)
sample = cv2.resize(sample, dsize=(0, 0), fx=4.0, fy=4.0, interpolation=cv2.INTER_CUBIC)
sample = cv2.resize(sample, dsize=(0, 0), fx=0.25, fy=0.25, interpolation=cv2.INTER_AREA)
cv2.imwrite(trY[i] + '/' + str(i) + '.jpg', sample)
i = i + 1
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = source_representation - test_representation
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, help='train/test', default='train')
parser.add_argument('--model_path', type=str, help='model check point file path', default='./model/m.ckpt')
parser.add_argument('--train_data', type=str, help='training data directory', default='input')
parser.add_argument('--test_data', type=str, help='test data directory', default='test')
args = parser.parse_args()
if args.mode == 'train':
model_path = args.model_path
imgs_dirname = args.train_data
num_class_per_group = len(os.listdir(imgs_dirname))
train(model_path)
else:
model_path = args.model_path
test_data = args.test_data
batch_size = 1
test(model_path, test_data)
| apache-2.0 |
tody411/InverseToon | inversetoon/batch/plot_isophote.py | 1 | 1055 | # -*- coding: utf-8 -*-
## @package inversetoon.batch.plot_isophote
#
# Isophote plotting functions.
# @author tody
# @date 2015/07/31
import matplotlib.pyplot as plt
from inversetoon.datasets.isophote import loadData
from inversetoon.batch.batch import isophoteDataSetBatch
from inversetoon.plot.isophote import ScenePlotter
from inversetoon.plot.window import showMaximize
def datasetFunc(data_name):
scene = loadData(data_name)
plotter = ScenePlotter(scene, plt)
silhouette_plotter = plotter.silhouettePlotter()
plt.title('Isophote Scene')
plotter.showNormalImage()
#silhouette_plotter.plotCVs()
silhouette_plotter.plotNormalVectors()
silhouette_plotter.plotCurves(color=(0.1, 0.5, 0.1))
for isophote_plotter in plotter.isophotePlotters():
#isophote_plotter.plotCVs()
isophote_plotter.plotCurves(color=(0.1, 0.1, 0.3))
isophote_plotter.plotNormalVectors(color=(0.1, 0.1, 0.3), step=5)
showMaximize()
if __name__ == '__main__':
isophoteDataSetBatch(datasetFunc) | mit |
aabadie/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
rscalzo/pyBoloSN | SNMCMC/SNMCMC.py | 1 | 18762 | #!/usr/bin/env python
"""
RS 2013/07/09: Monte Carlo Markov Chain base class for SN modeling
"""
# ----------------------------------------------------------------------------
# Dependencies
# ----------------------------------------------------------------------------
import acor
import emcee
import numpy as np
import matplotlib.pyplot as pypl
import cPickle as pickle
from scipy.stats import scoreatpercentile
from scipy.special import gammaincc, gammaln
from ..Utils import VerboseMsg, BadInputError
# ----------------------------------------------------------------------------
# Class definitions
# ----------------------------------------------------------------------------
def prob_contour_plot(x, y, xmin, xmax, nxbins, ymin, ymax, nybins):
"""Draws a contour plot of a probability density"""
# Bin up the histogram
data = np.array([x, y], dtype=np.float).T
Z, edges = np.histogramdd(data, bins=(nxbins, nybins),
range=((xmin, xmax), (ymin, ymax)))
Z = Z.T/Z.sum()
# Levels for contours as in Scalzo+ 2010, 2012
plevels = np.array([0.003, 0.01, 0.05, 0.10, 0.32, 1.0])
Zlevels = 0.0*plevels
colors = ['blue', 'cyan', 'lightgreen', 'yellow', 'red']
# Calculate the proper contour levels so that they enclose the stated
# integrated probability.
Zcl = 0.0
for zi in sorted(Z.ravel()):
Zcl += zi
Zlevels[plevels > Zcl] = zi
if Zcl > plevels[-2]: break
# Plot the filled contours and appropriate contour lines.
Xm = 0.5*(edges[0][1:] + edges[0][:-1])
Ym = 0.5*(edges[1][1:] + edges[1][:-1])
X, Y = np.meshgrid(Xm, Ym)
Zlevels[-1] = 1.0
pypl.contourf(X, Y, Z, Zlevels, colors=colors)
pypl.contour(X, Y, Z, Zlevels, linewidths=2, linestyles='-', colors='k')
def gammainccln(a, x, tol=1e-50):
"""Asymptotic series expansion of ln(gammaincc(a, x)) for large x"""
return (a-1)*np.log(x) - x + np.log(1 + (a-1)/x) - gammaln(a)
dfac, dfacm1, fsum, N = 1.0, 1.0, 0.0, max(a - 1, 0)
# max 50 terms, probably not worth more
for n in range(0, 50):
fsum += dfac
dfac *= (a - 1 - n)/x
if abs(dfac) < tol or abs(dfac) > abs(dfacm1): break
dfacm1 = dfac
return (a - 1)*np.log(x) - x + np.log(fsum) - gammaln(a)
class SNMCMC(object):
"""Class to encapsulate a basic MCMC parameter search
I'm writing this as a plug-and-play interface to emcee for coming up with
supernova models. Realistically, emcee is pretty easy to use already,
but there are certain things like setting up the initial state, bounds on
parameters, etc., which can be streamlined by doing them in this way.
"""
# Features of standard blob of chain, with upper and lower bounds
_features = \
[
# name, def, blo, hlo, hhi, bhi, res, fmt, label, e.g.:
# 'MWD': ( 1.4, 0.8, 0.8, 2.8, 2.8, 0.05, "{0:8.2f}",
# "Ejected mass (M$_\\odot$)"),
]
# Names of features to be used as main MCMC parameters
_mcmcpars = [ ]
# Names of features for which to report marginalized confidence intervals
_confints = [ ]
# Description of subplots to plot when MCMC is done running
_subplot_layout = (1, 1)
_subplots = [ ]
_contlvls = [0.01, 0.05, 0.16, 0.50, 0.84, 0.95, 0.99]
# Default keywords for __init__, with default values
_init_kwdef = { }
def __init__(self, verbose=True, **init_kwargs):
"""Initialize the object"""
# Unpack kwargs
kw = dict(self._init_kwdef)
kw.update(init_kwargs)
# Features to show in histogram summary
clname = self.__class__.__name__
featureset = set(self._features.keys())
# Check all the damn inputs to make sure they make sense.
mcmcpset = set(self._mcmcpars)
if not mcmcpset <= featureset:
raise BadInputError("user supplied MCMC parameters {0}"
" that are not in {1} feature {2}".format(
mcmcpset - featureset, clname, featureset))
subplfset = [ ]
for subtuple in self._subplots: subplfset += subtuple
subplfset = set(subplfset)
if not subplfset <= featureset:
raise BadInputError("user supplied plot features {0}"
" that are not in {1} feature {2}".format(
subplfset - featureset, clname, featureset))
conflset = set(self._confints)
if not conflset <= featureset:
raise BadInputError("user supplied confidence region features {0}"
" that are not in {1} feature {2}".format(
conflset - featureset, clname, featureset))
nsubplay, nsubplots = np.prod(self._subplot_layout), len(self._subplots)
if nsubplay < nsubplots:
raise BadInputError("user supplied plot layout {0} w/{1} subplots"
" but only {2} subplots specified".format(
self._subplot_layout, nsubplay, nsubplots))
for f, fv in self._features.items():
if not np.all([# fv[0] >= fv[1], fv[0] <= fv[4],
fv[1] <= fv[4], fv[2] <= fv[3],
fv[5] <= 0.1*(fv[3]-fv[2])]):
print "uh oh: ", fv
raise BadInputError("bad value boundaries in {0}._features"
" for feature {1}".format(clname, f))
# ok! finally time to move
self.p0 = np.array([self._features[f][0] for f in self._mcmcpars])
self.plo = np.array([self._features[f][1] for f in self._mcmcpars])
self.phi = np.array([self._features[f][4] for f in self._mcmcpars])
self.psig = np.array([self._features[f][5] for f in self._mcmcpars])/2
# make sure we're not too close to the boundaries
blo, bhi = self.plo + 3*self.psig, self.phi - 3*self.psig
self.p0[self.p0 < blo] = blo[self.p0 < blo]
self.p0[self.p0 > bhi] = bhi[self.p0 > bhi]
# mop-up
if not hasattr(self, 'vmsg'):
self.vmsg = VerboseMsg(prefix=clname, verbose=verbose)
self._default_blob = dict(
[(f, v[0]) for f, v in self._features.items()])
def lnPchisq(self, chisq, ndof):
"""Shortcut to log(gamma function) for chi-square fit probability
If the chi-square is less than 1000, evaluates the probability of
a good fit for N degrees of freedom using scipy.special.gammaincc().
Otherwise, uses an asymptotic series to prevent underflow.
"""
if chisq < 1000.0:
return np.log(gammaincc(0.5*ndof, 0.5*chisq))
else:
return gammainccln(0.5*ndof, 0.5*chisq)
def isgood(self, _lnprob, blob):
"""Should this blob should be counted in the final results?
SNMCMC is meant to be a virtual class, so the user needs to define
this for their problem in derived classes.
"""
return _lnprob > -50
def partest(self, pars):
"""Runs simple parameter test to see if we get the answer we expect"""
lnp, blob = self.lnprob(pars)
print "partest: pars =", pars
print "partest: blob =", blob
print "partest: logp =", lnp
def logl(self, pars, blob=None):
"""Log likelihood *only* for PTSampler
This assumes that all the parameters lie within bounds laid out in
self._features. Implicit bounds caused by physics *necessary* for the
likelihood to make sense, e.g., binding energy, must be included here.
SNMCMC is meant to be a virtual class, so the user needs to define
this for their problem in derived classes.
May also use and modify a blob if one is passed in.
"""
return 0.0
def logp(self, pars, blob=None):
"""Log prior *only* for PTSampler
This assumes that all the parameters lie within bounds laid out in
self._features. Implicit bounds caused by physics assumed beyond
what's needed to calculate the likelihood, e.g., neutronization,
must be included here. SNMCMC is meant to be a virtual class, so
the user needs to define this for their problem in derived classes.
May also use and modify a blob if one is passed in.
"""
return 0.0
def _bounded_logp(self, pars, blob=None):
"""Evaluate the log prior within given bounds
This wraps the virtual method logp(), enforcing boundaries on
parameters. Parameter bounds could be considered part of the prior.
"""
if not np.all([p > plo for p, plo in zip(pars, self.plo)] +
[p < phi for p, phi in zip(pars, self.phi)]):
return -np.inf
else:
return self.logp(pars)
def lnprob(self, pars):
"""Log likelihood + prior for EnsembleSampler
Splitting up lnprob = logl + logp makes more sense for PTSampler,
particularly because it doesn't support blobs. For EnsembleSampler
we put them back together.
"""
# Bayes' theorem: P(theta|y) = P(y|theta)*P(theta)/P(y)
# Set P(y) = 1 since those are the data we in fact observed
# Thus ln P(theta|y) = ln P(y|theta) + ln P(theta)
blob = self.fillblob(pars)
_logp = self.logp(pars, blob)
_logl = self.logl(pars, blob)
return _logl + _logp, blob
def _bounded_lnprob(self, pars):
"""Log likelihood + prior for EnsembleSampler, sampled within bounds"""
blob = self.fillblob(pars)
_logp = self._bounded_logp(pars, blob)
if _logp == -np.inf:
_logl = -np.inf
else:
_logl = self.logl(pars, blob)
return _logl + _logp, blob
def _acorthin(self):
"""Use autocorrelation time of chain(s) to guess optimal thinning"""
try:
if isinstance(self.sampler, emcee.PTSampler):
# We're most interested in the zero-temperature chain here
_acor = self.sampler.acor[0]
else:
# Just use everything
_acor = self.sampler.acor
return min(int(np.round(np.median(_acor))), 10)
except Exception as e:
self.vmsg("{0} while calculating acor: {1}".format(
e.__class__.__name__, e))
return 3
def _guess(self, nwalkers):
"""Makes an initial guess around the default position"""
return emcee.utils.sample_ball(self.p0, self.psig, nwalkers)
def run_mcmc(self, nsamples=100000, sampler="EnsembleSampler"):
"""Uses emcee.EnsembleSampler to sample our model's posterior
Makes some sensible guesses about how to accomplish what we want,
namely, to achieve some guaranteed specified number nsamples of
independent Monte Carlo samples of our problem's posterior.
-- Total number of samples is nwalkers x ndim x niter >= nsamples
-- To be within 10% of goal, check every 10% of iterations = 1 round
-- Use nwalkers = 20 x ndim and burn in for 1 round
-- If acceptance < 0.05 after 5 non-burn-in rounds, abort with error
"""
ndim = len(self.p0)
nwalkers = 20*ndim
if nsamples < 100*nwalkers: nsamples = 100*nwalkers
niter = nsamples / (10*nwalkers)
if sampler == "EnsembleSampler":
pars = self._guess(nwalkers)
self.sampler = emcee.EnsembleSampler(
nwalkers, ndim, self._bounded_lnprob)
elif sampler == "PTSampler":
ntemps = 20
betas = np.array([1.0/2**(0.5*n) for n in range(ntemps)])
pars = np.array([self._guess(nwalkers) for T in range(ntemps)])
self.sampler = emcee.PTSampler(ntemps, nwalkers, ndim,
self.logl, self._bounded_logp, betas=betas)
else:
raise BadInputError("'sampler' parameter to run_mcmc must be in "
"['EnsembleSampler', 'PTSampler']")
# Burn in the chain. The burning-in process means a lot of the early
# samples will be strongly correlated, but a well-burned-in chain
# should be uncorrelated every 2-3 steps, so start with thin = 3.
# Really burn the damn thing in!
pars = self.sampler.run_mcmc(pars, 10*niter, thin=10)[0]
thin = thin0 = 3
self.vmsg("Starting run with thin =", thin)
stopme = False
self.sampler.reset()
while True:
try:
self.sampler.run_mcmc(pars, niter*thin, thin=thin)
except MemoryError:
# If we run out of memory, just use what we've got
stopme = True
if sampler == "EnsembleSampler":
# Just retrieve the samples
nblobs = np.prod(self.sampler.chain[0].shape)/ndim
self.lnproblist = np.array(self.sampler.lnprobability).ravel()
self.bloblist = np.array(self.sampler.blobs).ravel()
faccept = np.median(self.sampler.acceptance_fraction)
elif sampler == "PTSampler":
# Reincarnate zero-temp blobs, which PTSampler doesn't support
nblobs = np.prod(self.sampler.chain[0].shape)/ndim
bpars = self.sampler.chain[0].reshape(nblobs, ndim)
self.lnproblist = np.array(self.sampler.lnprobability[0]).ravel()
self.bloblist = np.array([self.fillblob(p) for p in bpars])
faccept = np.median(self.sampler.acceptance_fraction[0])
else:
pass
self.goodidx = np.array([self.isgood(lnp, b) for lnp, b in
zip(self.lnproblist, self.bloblist)])
nblobs = len(self.goodidx)
ngood = sum(self.goodidx)
if ngood > nsamples or stopme:
self.vmsg("Quitting with {0} good blobs".format(ngood))
break
elif len(self.bloblist) > 0.5*nsamples and faccept < 0.05:
self.vmsg("acceptance fraction = {0}, convergence sucks"
.format(faccept))
self.vmsg("bailing after {0} samples, don't trust results"
.format(nblobs))
break
else:
self.vmsg("Chain has {0} good blobs so far".format(ngood))
self.vmsg("lnprob min, max = {0}, {1}".format(
np.min(self.lnproblist), np.max(self.lnproblist)))
self.vmsg("acceptance fraction =", faccept)
"""
failtot = { }
for b in self.bloblist:
if b['fail'] not in failtot: failtot[b['fail']] = 0
failtot[b['fail']] += 1
self.vmsg("Fail totals:", failtot)
"""
thin = self._acorthin()
if thin != thin0:
self.vmsg("Adjusting thin ~ tau =", thin)
thin0 = thin
def show_results(self, makeplots=True, showplots=True, plotfname=None):
"""Display results of Markov chain sampling.
Shows marginalized confidence intervals on key parameters, as well as
full histograms and contour plots of 2-D joint confidence regions.
"""
# If we're going to save or show plots, we have to show them first
if plotfname or showplots: makeplots = True
# Unpack the good blobs into plot-ready numpy arrays
if sum(self.goodidx) < 5:
self.vmsg("No good blobs, hence no results to show!")
return
goodprobs = self.lnproblist[self.goodidx]
goodblobs = self.bloblist[self.goodidx]
allsamp = dict([(feature, np.array([b[feature] for b in goodblobs]))
for feature in goodblobs[0]])
print self.__class__.__name__, "fit results:"
print " len(bloblist) =", len(self.bloblist)
print " len(goodblobs) =", len(goodblobs)
print " physical frac. =", len(goodblobs)*1.0/len(self.bloblist)
print " accepted frac. =", np.median(self.sampler.acceptance_fraction)
self.lnprobs, self.blobs = goodprobs, goodblobs
for lnp, b in zip(self.lnprobs, self.blobs):
b['lnprob'] = lnp
# max probability
Pfit_max = np.exp(np.max(goodprobs))
print " best fit prob. =", Pfit_max
# feature quantiles
self.pctiles = { }
for f in self._confints:
self.pctiles[f] = np.array(
[scoreatpercentile(allsamp[f], 100.0*q)
for q in self._contlvls], dtype=np.float).ravel()
print "Attr. ", " ".join(["{0:8.2f}".format(p) for p in self._contlvls])
for f in self._confints:
print "{0:8s}".format(f),
print " ".join([self._features[f][6].format(p)
for p in self.pctiles[f]])
# make plots
if not makeplots: return
pypl.figure()
for i in range(len(self._subplots)):
pypl.subplot(self._subplot_layout[0], self._subplot_layout[1], i+1)
if len(self._subplots[i]) == 1:
f = self._subplots[i][0]
c = self._features[f]
data = allsamp[f]
hlo, hhi, res, fmt, label = c[2], c[3], c[5], c[6], c[7]
nbins = int(round((hhi-hlo)/(1.0*res)))
pypl.hist(data, nbins, range=(hlo, hhi), normed=1,
facecolor='green', alpha=0.75)
pypl.xlabel(label)
elif len(self._subplots[i]) == 2:
fx, fy = self._subplots[i]
cx, cy = self._features[fx], self._features[fy]
dx, dy = allsamp[fx], allsamp[fy]
hxlo, hxhi, xres, xfmt, xlabel = [cx[j] for j in (2,3,5,6,7)]
hylo, hyhi, yres, yfmt, ylabel = [cy[j] for j in (2,3,5,6,7)]
nxbins = int(round((hxhi-hxlo)/(1.0*xres)))
nybins = int(round((hyhi-hylo)/(1.0*yres)))
prob_contour_plot(dx, dy, hxlo, hxhi, nxbins, hylo, hyhi, nybins)
pypl.xlabel(xlabel)
pypl.ylabel(ylabel)
else:
raise BadInputError("bad entry #{0} in {1}._subplots".format
(i, self.__class__.__name__))
if showplots:
pypl.show()
if plotfname:
pypl.savefig(plotfname)
| mit |
puntofisso/jupyter | jupyterworkflow/data.py | 1 | 1040 | import os
from urllib.request import urlretrieve
import pandas as pd
FREEMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='Freemont.csv', url=FREEMONT_URL, force_download=False):
""" Download and cache the frreemont data
Parameters
----------
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redoanload
Returns
-------
data : pandas.DataFrame
the freemont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
data = pd.read_csv('Freemont.csv', index_col='Date')
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', 'East']
data['Total'] = data['West'] + data['East']
return data
| mit |
lituan/tools | mega_ancestor_msa.py | 2 | 3654 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pipeline
construct phylogenetic tree and estimate ancestors using msa
"""
import os
import sys
import subprocess
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from collections import OrderedDict
from scipy.stats import ttest_ind
def read_mega(mega_f):
with open(mega_f) as o_f:
lines = o_f.readlines()
lines = [line.rstrip('\r\n') for line in lines]
lines = [line for line in lines if line]
seqs = OrderedDict()
for line in lines:
name,seq = line.split(':')
seq = seq.strip(' ')
if '.' in name:
if not name in seqs.keys():
seqs[name] = seq
else:
seqs[name] = seqs[name]+seq
sequences = []
for name,seq in seqs.iteritems():
if ')' in name:
sequences.append(('ancestor',name,seq))
else:
sequences.append(('current',name,seq))
return sequences
def get_similarity(seqs):
# seqs format, [['ancestor',pro,'ATR...']...]
def get_sim(r1,r2):
return len([1 for i,r1i in enumerate(r1) if r1i == r2[i]])*1.0/len(r1)
similarity = []
for state,pro,seq in seqs:
repeats = [seq[i:i+18] for i in range(0,len(seq),18)]
sims = []
for i in range(8):
for j in range(8):
if j > i:
sims.append(get_sim(repeats[i],repeats[j]))
similarity.append((state,pro,sims))
return similarity
def strip_plot(similarity,fname):
# similarity format [['ancestor',pro,[0.3,04...]]...]
p_label = []
for i in range(28):
anc = [si[i] for s,name,si in similarity if s == 'ancestor']
now = [si[i] for s,name,si in similarity if s == 'current']
pvalue = ttest_ind(anc,now,equal_var=False)
if pvalue[-1] < 0.0001:
p_label.append('****')
elif pvalue[-1] < 0.001:
p_label.append('***')
elif pvalue[-1] < 0.01:
p_label.append('**')
elif pvalue[-1] < 0.05:
p_label.append('*')
else:
p_label.append('')
state = [ ]
repeat_pair = []
repeat_pair_similarity = []
keys = [str(i)+'_'+str(j) for i in range(8) for j in range(8) if j > i]
for s,name,sims in similarity:
state += [s] * 28
repeat_pair += keys
repeat_pair_similarity += sims
df = pd.DataFrame({'state':state,'repeat_pair':repeat_pair,'repeat_pair_similarity':repeat_pair_similarity})
sns.set(style='whitegrid', color_codes=True)
f,ax = plt.subplots(figsize=(12,8))
# sns.stripplot(x='repeat_pair',y='repeat_pair_similarity',hue='state',palette='Set2',data=df,jitter=True)
# sns.violinplot(x='repeat_pair',y='repeat_pair_similarity',hue='state',palette='Set2',data=df,split=True)
sns.violinplot(x='repeat_pair',y='repeat_pair_similarity',hue='state',palette='muted',data=df,split=True)
# add pvalue label
anno_y = [max([si[i] for _,_,si in similarity])+0.10 for i in range(28) ]
for i in range(28):
ax.annotate(p_label[i],(i,anno_y[i]))
# remove duplicated legends
handles,labels = ax.get_legend_handles_labels()
plt.legend(handles[:2],labels[:2])
plt.savefig(fname+'_repeats_similarity.png',dpi=300)
def main():
fname = os.path.split(sys.argv[-1])[1].split('.')[0]
seqs = read_mega(sys.argv[-1])
similarity = get_similarity(seqs)
strip_plot(similarity,fname)
if __name__ == "__main__":
main()
| cc0-1.0 |
herow/planning_qgis | python/plugins/processing/algs/qgis/VectorLayerScatterplot.py | 2 | 2907 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class VectorLayerScatterplot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
XFIELD = 'XFIELD'
YFIELD = 'YFIELD'
def defineCharacteristics(self):
self.name = 'Vector layer scatterplot'
self.group = 'Graphics'
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.XFIELD,
self.tr('X attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.YFIELD,
self.tr('Y attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Output')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
xfieldname = self.getParameterValue(self.XFIELD)
yfieldname = self.getParameterValue(self.YFIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, xfieldname, yfieldname)
plt.close()
plt.scatter(values[xfieldname], values[yfieldname])
plt.ylabel(yfieldname)
plt.xlabel(xfieldname)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
| gpl-2.0 |
CospanDesign/python | opencv/klt_test.py | 1 | 16916 | #! /usr/bin/env python
# Copyright (c) 2017 Dave McCoy ([email protected])
#
# NAME is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NAME is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NAME; If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import argparse
import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib.figure import SubplotParams
from math import *
cap = cv2.VideoCapture("data/videoplayback.mp4")
NAME = os.path.basename(os.path.realpath(__file__))
DESCRIPTION = "\n" \
"\n" \
"usage: %s [options]\n" % NAME
EPILOG = "\n" \
"\n" \
"Examples:\n" \
"\tSomething\n" \
"\n"
DEFAULT_START_FEATURE = 25
PATCH_SIZE = 3
PYRAMID_DEPTH = 5
MATCH_THRESHOLD = 0.3
CIRCLE_SIZE = 10
#ENERGY_THRESHOLD = 0.1
ENERGY_THRESHOLD = 0.0
ROTATION_ANGLE = 0
MAX_CORNERS = 100
PYRAMID_POS = PYRAMID_DEPTH - 1
RED = (0xFF, 0x00, 0x00)
GREEN = (0x00, 0xFF, 0x00)
BLUE = (0x00, 0x00, 0xFF)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = MAX_CORNERS,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
subpix_params = dict( zeroZone = (-1,-1),
winSize = (10,10),
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS,20,0.03))
INITIAL_WARP = [[cos(radians(ROTATION_ANGLE)), -sin(radians(ROTATION_ANGLE)), 0],
[sin(radians(ROTATION_ANGLE)), cos(radians(ROTATION_ANGLE)), 0]]
def get_patch(img, point, patch_size):
np.zeros(shape = (patch_size, patch_size), dtype=np.uint8)
patch = np.zeros(shape = (patch_size, patch_size), dtype=np.uint8)
for y in range((int(patch_size / 2) * -1), int(patch_size / 2) + 1):
for x in range((int(patch_size / 2) * -1), int(patch_size / 2) + 1):
py = y + int(patch_size / 2)
px = x + int(patch_size / 2)
patch[px][py] = img[point[0] + y][point[1] + x]
return patch
def apply_affine_image_warp(in_patch, in_point, transform=[[0, 0, 0], [0, 0, 0]], energy_threshold=ENERGY_THRESHOLD):
energy_min = 1.0 - energy_threshold
height = in_patch.shape[0]
y_start = - (height / 2)
y_end = (height / 2)
width = in_patch.shape[1]
x_start = - (width / 2)
x_end = (width / 2)
weight_patch = np.zeros(shape = (height, width), dtype=np.float32)
#out_patch = in_patch
out_patch = np.zeros(shape = (height, width), dtype=np.uint8)
#First apply the translation to the point under question
out_point = [int(in_point[0] + transform[0][2]),
int(in_point[1] + transform[1][2])]
#Apply the rotation, shearing and scaling to the in_path
for y in range (y_start, y_end + 1):
for x in range (x_start, x_end + 1):
x_in = x + 1
y_in = y + 1
#Find the location after the rotation and scaling
x_out = transform[0][0] * x + transform[0][1] * y
y_out = transform[1][0] * x + transform[1][1] * y
#Now we have the weight of how much we should put in the output image
# so if you have 50% of 0,0 going into 1,0 then we have 0.5 * value into 1,0
#Go through each of the output coordinates
x_out_pos = x_out + 1
y_out_pos = y_out + 1
for yout in range (0, height):
for xout in range(0, width):
#x_pos_diff = x_out_pos - xout
#y_pos_diff = y_out_pos - yout
x_energy = 1.0 * abs(x_out_pos - xout)
y_energy = 1.0 * abs(y_out_pos - yout)
if (x_energy < energy_min) and (y_energy < energy_min):
x_energy = 1.0 - x_energy
y_energy = 1.0 - y_energy
cell_energy = x_energy * y_energy
out_patch[yout][xout] += int(cell_energy * in_patch[y_in][x_in])
weight_patch[yout][xout] += cell_energy
print ("% 2dx% 2d -> % 2dx% 2d Output pos: % 2dx% 2d: Energy: %f: %d -> %d" %
(x, y,
xout - 1, yout - 1,
xout - 1, yout - 1,
cell_energy,
in_patch[y_in][x_in],
out_patch[yout][xout]))
for yout in range(0, height):
for xout in range(0, width):
print ("Weidth: % 2dx% 2d: %f" % (yout, xout, weight_patch[yout][xout]))
out_patch[yout][xout] = int(out_patch[yout][xout] / weight_patch[yout][xout])
return out_point, out_patch
def get_gradiants(gray_image, ypos, xpos):
#For all elements in a patch, find the gradiant of x, y and xy
#Set the position of starting of region of interest to be half a patch before (x and y)
ystart = ypos - int(PATCH_SIZE / 2)
xstart = xpos - int(PATCH_SIZE / 2)
#Iterate through all the elements in both Y and X region to determine the gradiant of X, Gradiant of Y and XY
gx = gray_image[y + 0][x + 0] + \
gray_image[y + 1][x + 0] + \
gray_image[y + 2][x + 0] - \
gray_image[y + 0][x + 2] - \
gray_image[y + 1][x + 2] - \
gray_image[y + 2][x + 2]
gy = gray_image[y + 0][x + 0] + \
gray_image[y + 0][x + 1] + \
gray_image[y + 0][x + 2] - \
gray_image[y + 2][x + 0] - \
gray_image[y + 2][x + 1] - \
gray_image[y + 2][x + 2]
gxy = gx * gy
return (gx, gy, gxy)
def find_error_coefficents(in_patch, out_patch):
error = 0
sigma_x = 0
sigma_y = 0
sigma_xy = 0
sigma_xt = 0
sigma_yt = 0
h_matrix = np.zeros(shape(AFFINE_SIZE, AFFINE_SIZE), dtype=float)
for y in range(in_patch.shape[0]):
for x in range(in_patch.shape[1]):
sigma_x += in_patch[y][x]
#First Row
h_matrix[0, 0] = Ix_sqrd * (px * px)
h_matrix[0, 1] = Ix_sqrd * (px * py)
h_matrix[0, 2] = Ix_sqrd * (px)
h_matrix[0, 3] = Ixy * (px * px)
h_matrix[0, 4] = Ixy * (px * py)
h_matrix[0, 5] = Ixy * (px)
#Second Row
h_matrix[0, 0] = Ix_sqrd * (px * py)
h_matrix[0, 1] = Ix_sqrd * (py * py)
h_matrix[0, 2] = Ix_sqrd * (py)
h_matrix[0, 3] = Ixy * (px * py)
h_matrix[0, 4] = Ixy * (py * py)
h_matrix[0, 5] = Ixy * (py)
#Third Row
h_matrix[0, 0] = Ix_sqrd * (px)
h_matrix[0, 1] = Ix_sqrd * (py)
h_matrix[0, 2] = Ix_sqrd
h_matrix[0, 3] = Ixy * (px)
h_matrix[0, 4] = Ixy * (py)
h_matrix[0, 5] = Ixy
#Fourth Row
h_matrix[0, 0] = Ixy * (px * px)
h_matrix[0, 1] = Ixy * (px * py)
h_matrix[0, 2] = Ixy * (px)
h_matrix[0, 3] = Iy_sqrd * (px * px)
h_matrix[0, 4] = Iy_sqrd * (px * py)
h_matrix[0, 5] = Iy_sqrd * (px)
#Fifth Row
h_matrix[0, 0] = Ixy * (px * py)
h_matrix[0, 1] = Ixy * (py * py)
h_matrix[0, 2] = Ixy * (py)
h_matrix[0, 3] = Iy_sqrd * (px * py)
h_matrix[0, 4] = Iy_sqrd * (py * py)
h_matrix[0, 5] = Iy_sqrd * (py)
#Sixth Row
h_matrix[0, 0] = Ixy * (px)
h_matrix[0, 1] = Ixy * (py)
h_matrix[0, 2] = Ixy
h_matrix[0, 3] = Iy_sqrd * (px)
h_matrix[0, 4] = Iy_sqrd * (py)
h_matrix[0, 5] = Iy_sqrd
return error
def klt_track(image,
prev_gray,
gray,
features,
patch_size = PATCH_SIZE,
pyramid_depth = PYRAMID_DEPTH,
match_threshold = MATCH_THRESHOLD,
feature_pos = 0,
angle = INITIAL_WARP,
pyramid_pos = PYRAMID_POS,
energy_threshold=ENERGY_THRESHOLD,
debug = False):
#Create the Pyramids
pyramids = []
ppyramids = []
for i in range(pyramid_depth):
if (i == 0):
pyramids.append(gray)
ppyramids.append(prev_gray)
else:
pyramids.append(cv2.pyrDown(pyramids[i - 1]))
ppyramids.append(cv2.pyrDown(ppyramids[i - 1]))
point = features[feature_pos][0];
pyramid_points = [0] * pyramid_depth
if debug:
fig = cv2.figure(figsize=(10, 15))
for i in range (pyramid_depth):
r = (pyramid_depth - 1) - i
scale = 2 ** r
x = int(point[0] / scale)
y = int(point[1] / scale)
pyramid_points[r] = [x, y]
#print "Pyramid Points: %d: (%d, %d)" % (r, x, y)
if debug:
pos = (i * 2) + 1
a = fig.add_subplot(pyramid_depth, 2, pos)
img = cv2.cvtColor(ppyramids[-1 + (-1 * i)], cv2.COLOR_GRAY2RGB)
cv2.circle(img, (x, y), (CIRCLE_SIZE / scale), RED, -1)
cv2.imshow(img, interpolation='none')
a.set_title("Template: %d" % (r + 1))
pos += 1
a = fig.add_subplot(pyramid_depth, 2, pos)
img = cv2.cvtColor(pyramids[-1 + (-1 * i)], cv2.COLOR_GRAY2RGB)
cv2.imshow(img, interpolation='none')
a.set_title("Image: %d" % (r + 1))
#print "Find patch from template image"
#print "Patch at top of pyramid (As seen on template '%d')" % pyramid_depth
#pyramid_pos = 4
in_point = pyramid_points[pyramid_pos]
in_template_image = ppyramids[pyramid_pos]
in_dut_image = pyramids[pyramid_pos]
in_patch = get_patch(in_template_image, in_point, patch_size)
#Initial Transform is no movement
transform = [[cos(radians(angle)), -sin(radians(angle)), 0],
[sin(radians(angle)), cos(radians(angle)), 0]]
out_point, xfrm_patch = apply_affine_image_warp(in_patch, in_point, transform, energy_threshold)
dut_patch = get_patch(in_dut_image, out_point, patch_size)
patch_width = PATCH_SIZE
patch_height = PATCH_SIZE
patch_scale = 100
#in_patch.resize ((patch_width * patch_scale, patch_height * patch_scale))
#xfrm_patch.resize ((patch_width * patch_scale, patch_height * patch_scale))
#dut_patch.resize ((patch_width * patch_scale, patch_height * patch_scale))
in_patch_scale = np.kron(in_patch, np.ones((patch_scale, patch_scale)))
xfrm_patch_scale = np.kron(xfrm_patch, np.ones((patch_scale, patch_scale)))
dut_patch_scale = np.kron(dut_patch, np.ones((patch_scale, patch_scale)))
img_patches = np.zeros((patch_height * patch_scale, patch_width * patch_scale * 3), dtype=np.uint8)
for y in range(0, patch_scale * patch_height):
for x in range(0, patch_scale * patch_width):
img_patches[y][x + (patch_width * patch_scale * 0)] = in_patch_scale[y][x]
img_patches[y][x + (patch_width * patch_scale * 1)] = xfrm_patch_scale[y][x]
img_patches[y][x + (patch_width * patch_scale * 2)] = dut_patch_scale[y][x]
#img_patches = cv2.cvtColor(img_patches, cv2.CV_8UC1)
im_color = cv2.applyColorMap(img_patches, cv2.COLORMAP_JET)
#cv2.imshow("Main", img_patches)
cv2.imshow("Main", im_color)
#fig = cv2.figure(figsize=(10, 15))
#a = fig.add_subplot(1, 3, 1)
#cv2.imshow(in_patch, cmap="gray", interpolation='none')
#XXX: cv2.imshow("Main Window", in_patch)
#a.set_title("Template Patch")
#a = fig.add_subplot(1, 3, 2)
#cv2.imshow(xfrm_patch, cmap="gray", interpolation='none')
#XXX: cv2.imshow("Main Window", xfrm_patch)
#a.set_title("Template Patch Post Transform")
#a = fig.add_subplot(1, 3, 3)
#cv2.imshow(dut_patch, cmap="gray", interpolation='none')
#XXX: cv2.imshow("Main Window", dut_patch)
#a.set_title("DUT Patch")
print ("Template Feature Point (%d, %d) Template Original Patch:\n%s" %
(in_point[0], in_point[1], str(in_patch)))
print ("Template Transform Point Point (%d, %d) Template Transform Patch:\n%s" %
(out_point[0], out_point[1], str(xfrm_patch)))
print ("DUT Image Point (%d, %d) DUT Patch:\n%s" %
(out_point[0], out_point[1], str(dut_patch)))
#error = find_patch_error(xfrm_patch, dut_patch)
#print ("Error: %d" % error)
def update_klt(features, gray, prev_gray, image, feature_select, angle, pyramid_pos, energy_threshold, debug = False):
print "Feature Count: %d" % len(features)
if debug:
cv2.imshow(image)
cv2.title("All Features Found (Red is focused)")
for point in features:
cv2.circle( image,
( int(point[0][0]),
int(point[0][1])),
CIRCLE_SIZE,GREEN,
-1)
point = features[feature_select]
cv2.circle( image,
(
int(point[0][0]),
int(point[0][1])),
CIRCLE_SIZE,RED,
-1)
klt_track(image,
prev_gray,
gray,
features,
patch_size = PATCH_SIZE,
pyramid_depth = PYRAMID_DEPTH,
match_threshold = MATCH_THRESHOLD,
feature_pos = feature_select,
pyramid_pos = pyramid_pos,
angle = angle,
energy_threshold=ENERGY_THRESHOLD,
debug = False)
def main(argv):
#Parse out the commandline arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
feature_select = "%s" % DEFAULT_START_FEATURE
angle = "%s" % ROTATION_ANGLE
pyramid_pos = "%s" % PYRAMID_POS
energy_threshold = "%s" % ENERGY_THRESHOLD
parser.add_argument("-f", "--feature",
nargs=1,
default=[feature_select])
parser.add_argument("-r", "--rotation",
nargs=1,
default=[angle])
parser.add_argument("-e", "--energy",
nargs=1,
default=[energy_threshold])
parser.add_argument("-p", "--pyramid",
nargs=1,
default=[pyramid_pos])
parser.add_argument("-d", "--debug",
action="store_true",
help="Enable Debug Messages")
parser.add_argument("--display",
action="store_true",
help="Display View")
args = parser.parse_args()
print "Running Script: %s" % NAME
if args.debug:
print "feature: %s" % str(args.feature[0])
feature_select = int(args.feature[0])
angle = int(args.rotation[0])
pyramid_pos = int(args.pyramid[0])
energy_threshold = float(args.energy[0])
ret, image = cap.read()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gimage = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
# search for good points
features = cv2.goodFeaturesToTrack(gray, **feature_params)
# refine the corner locations
cv2.cornerSubPix(gray,features, **subpix_params)
prev_gray = gray
#Track Features until we loose a few of them
ret, image = cap.read()
gray = cv2.cvtColor(image,cv2. COLOR_BGR2GRAY)
while (True):
update_klt(features, gray, prev_gray, gimage, feature_select, angle, pyramid_pos, energy_threshold, args.debug)
angle += 5
if cv2.waitKey(10) & 0xFF == ord('q'):
break
if __name__ == "__main__":
main(sys.argv)
| mit |
ephes/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
donbright/piliko | experiment/bernoulli/pythbern_butterfly.py | 1 | 1366 | from fractions import Fraction as Fract
import sys
# rational paramterization / approximation of bernoulli's lemniscate
# traditional form: ( x^2 + y^2 ) ^2 = 2*( x^2 - y^2 )
# chromogeometry form:
# x = (blueq/redq) / blueq( blueq/redq, greenq/redq )
# y = (greenq/redq) / blueq( blueq/redq, greenq/redq )
# where q = quadrance between 0,0 and integer point m,n
# please see pythbernlem.py for full explanation
def sqr(x): return x*x
def greenq(x,y,x2,y2): return 2*(x2-x)*(y2-y)
def redq(x,y,x2,y2): return sqr(x2-x)-sqr(y2-y)
def blueq(x,y,x2,y2): return sqr(x2-x)+sqr(y2-y)
xs,ys=[],[]
depth = 20
for m in range(-depth,depth):
for n in range(-depth,depth):
if redq(0,0,m,n)==0: continue
if greenq(0,0,m,n)==0: continue
bq,rq,gq = blueq(0,0,m,n),redq(0,0,m,n),greenq(0,0,m,n)
x = Fract( Fract(bq,gq), blueq(0,0,Fract(bq,gq),Fract(bq,rq)) )
y = Fract( Fract(rq,gq), blueq(0,0,Fract(rq,gq),Fract(rq,rq)) )
xs += [x]
ys += [y]
max=max(xs+ys)
for i in range(0,2):
print xs[i],',',ys[i],
print '....'
for i in range(0,len(xs)):
xs[i] = Fract( xs[i], max )
ys[i] = Fract( ys[i], max )
print len(xs), 'points'
import numpy as np
import matplotlib.pylab as plt
fig,ax = plt.subplots(figsize=(8,8))
ax.set_ylim([-1.2,1.2])
ax.set_xlim([-1.2,1.2])
for i in range(0,len(xs)):
xs[i]=xs[i]#+zs[i]/4
ys[i]=ys[i]#+zs[i]/4
ax.scatter(xs,ys)
plt.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
meren/anvio | anvio/parsers/hmmer.py | 3 | 35794 | # -*- coding: utf-8
"""Parser for HMMER's various outputs"""
import anvio
import anvio.utils as utils
import anvio.terminal as terminal
from anvio.errors import ConfigError
from anvio.parsers.base import Parser
import numpy as np
import pandas as pd
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
class HMMERStandardOutput(object):
"""Parse the standard output of HMMER programs (NOTE: currently only works with hmmsearch)
The main meat of this class is to produce the attributes:
(1) self.seq_hits
(2) self.dom_hits
(3) self.ali_info
(1) self.seq_hits is a dataframe that looks like this:
| query acc target query_len evalue score bias \
| 0 3Beta_HSD PF01073.18 1998 282 5.200000e-23 76.2 0.0
| 1 3Beta_HSD PF01073.18 1723 282 1.300000e-07 25.7 0.0
| ... ... ... ... ... ... ... ...
| 3128 Voltage_CLC PF00654.19 320 354 7.200000e-65 214.3 37.1
| 3129 YkuD PF03734.13 30 146 1.700000e-14 49.3 0.2
| best_dom_evalue best_dom_score best_dom_bias expected_doms num_doms
| 0 6.600000e-22 72.6 0.0 2.0 1
| 1 1.700000e-07 25.3 0.0 1.2 1
| ... ... ... ... ... ...
| 3128 7.800000e-64 210.9 29.1 2.0 1
| 3129 3.800000e-14 48.2 0.2 1.7 1
(2) self.dom_hits is a frame that looks like this:
| query acc target domain qual score bias c-evalue \
| 0 3Beta_HSD PF01073.18 1998 1 ! 72.6 0.0 2.900000e-24
| 1 3Beta_HSD PF01073.18 1723 1 ! 25.3 0.0 7.300000e-10
| ... ... ... ... ... ... ... ... ...
| 2896 Voltage_CLC PF00654.19 320 1 ! 210.9 29.1 1.700000e-66
| 2897 YkuD PF03734.13 30 1 ! 48.2 0.2 8.400000e-17
|
| i-evalue hmm_start hmm_stop hmm_bounds ali_start ali_stop \
| 0 6.600000e-22 1 237 [. 4 243
| 1 1.700000e-07 1 95 [. 4 92
| ... ... ... ... ... ... ...
| 2896 7.800000e-64 3 352 .. 61 390
| 2897 3.800000e-14 2 146 .] 327 459
|
| ali_bounds env_start env_stop env_bounds mean_post_prob \
| 0 .. 4 254 .. 0.74
| 1 .. 4 148 .. 0.72
| ... ... ... ... ... ...
| 2896 .. 59 392 .. 0.94
| 2897 .. 326 459 .. 0.78
|
| match_state_align comparison_align sequence_align
| 0 vvtGggGFlGrrivkeLlrl... +v+Gg+G++G++ v +L++ ... LVLGGAGYIGSHAVDQLISK...
| 1 vvtGggGFlGrrivkeLlrl... ++ Gg+GFlG++i k L+++... IIFGGSGFLGQQIAKILVQR...
| ... ... ... ...
| 2896 gllagllvkrvapeaagsGi... g++ +++ r+ + a G ... GVVFTYFYTRF-GKNASRGN...
| 2897 kyivvdlaeqrllvlyengk... +yi++dl++q++ +++ +gk... NYIEIDLKDQKM-YCFIDGK...
If you're confused about the meaning of these columns, please see starting from page 32
of the HMMER guide http://eddylab.org/software/hmmer/Userguide.pdf. There you will be able
to with relative ease correlate the column names in these tables to what is described
meticulously in the tutorial. For example, `best_dom_bias` refers to the the 'bias (best 1
domain)' column.
(3) ali_info is a nested dictionary that can be used to access on a per-hit basis which residues
in a sequence aligned to which residues in the HMM.
Parameters
==========
hmmer_std_out : str
Path to output of HMMER.
context : str, None
If provided, operations specific to a context will also be carried out. Choose from
{'interacdome'}
"""
def __init__(self, hmmer_std_out, context=None, run=terminal.Run(), progress=terminal.Progress()):
self.run = run
self.progress = progress
self.hmmer_std_out = hmmer_std_out
self.context = context
self.set_names()
self.ali_info = {}
# This is converted to a dataframe after populating
self.seq_hits = {
self.query_col: [],
self.acc_col: [],
self.target_col: [],
self.query_len_col: [],
'evalue': [],
'score': [],
'bias': [],
'best_dom_evalue': [],
'best_dom_score': [],
'best_dom_bias': [],
'expected_doms': [],
'num_doms': [],
}
self.seq_hits_dtypes = {
self.query_col: str,
self.acc_col: str,
self.target_col: str,
self.query_len_col: int,
'evalue': float,
'score': float,
'bias': float,
'best_dom_evalue': float,
'best_dom_score': float,
'best_dom_bias': float,
'expected_doms': float,
'num_doms': int,
}
# This is converted to a dataframe after populating
self.dom_hits = {
self.query_col: [],
self.acc_col: [],
self.target_col: [],
'domain': [],
'qual': [],
'score': [],
'bias': [],
'c-evalue': [],
'i-evalue': [],
'hmm_start': [],
'hmm_stop': [],
'hmm_bounds': [],
'ali_start': [],
'ali_stop': [],
'ali_bounds': [],
'env_start': [],
'env_stop': [],
'env_bounds': [],
'mean_post_prob': [],
'match_state_align': [],
'comparison_align': [],
'sequence_align': [],
}
self.dom_hits_dtypes = {
self.query_col: str,
self.acc_col: str,
self.target_col: str,
'domain': int,
'qual': str,
'score': float,
'bias': float,
'c-evalue': float,
'i-evalue': float,
'hmm_start': int,
'hmm_stop': int,
'hmm_bounds': str,
'ali_start': int,
'ali_stop': int,
'ali_bounds': str,
'env_start': int,
'env_stop': int,
'env_bounds': str,
'mean_post_prob': float,
'match_state_align': str,
'comparison_align': str,
'sequence_align': str,
}
self.delim_query = '//\n'
self.delim_seq = '>>'
self.delim_domain = '=='
self.load()
def load(self):
self.progress.new('Processing HMMER output')
self.progress.update('Parsing %s' % self.hmmer_std_out)
with open(self.hmmer_std_out) as f:
for i, query in enumerate(utils.get_chunk(f, separator=self.delim_query, read_size=32768)):
if i % 500 == 0:
self.progress.update('%d done' % i)
self.progress.increment(increment_to=i)
self.process_query(query)
self.seq_hits = pd.DataFrame(self.seq_hits).astype(self.seq_hits_dtypes)
self.dom_hits = pd.DataFrame(self.dom_hits).astype(self.dom_hits_dtypes)
self.progress.end()
self.additional_processing()
self.run.info('Loaded HMMER results from', self.hmmer_std_out)
def find_line(self, condition):
for line in self.query_lines[self.line_no:]:
self.line_no += 1
if line.startswith('#'):
continue
if condition(line):
return line
else:
return False
def read_lines_until(self, condition, include_last=False, store=True):
lines = []
return_value = lines if store else True
for line in self.query_lines[self.line_no:]:
self.line_no += 1
if line.startswith('#'):
continue
if condition(line):
if include_last and store:
lines.append(line)
return lines
if store:
lines.append(line)
else:
if store:
return lines
else:
return False
def process_query(self, query):
if self.delim_seq not in query:
# This query had no hits
return
self.query_lines = query.split('\n')
self.line_no = 0
line = self.find_line(lambda line: line.startswith('Query:'))
line_split = line.split()
query_name = line_split[1]
query_len = int(line_split[2][line_split[2].find('=')+1:-1])
line = self.find_line(lambda line: line.startswith('Accession:'))
acc = line.split()[1]
line = self.find_line(lambda line: line.lstrip().startswith('E-value'))
description_index = line.find('Desc')
fields = line[:description_index].split() # ignore last 'Description' field
assert len(fields) == 9, "Please report this on github with your HMMER version"
self.read_lines_until(lambda line: line.lstrip().startswith('-------'), store=False)
seq_score_lines = self.read_lines_until(lambda line: line == '')
num_doms_per_seq = {}
for seq_score_line in seq_score_lines:
seq_scores = seq_score_line[:description_index].split()
self.seq_hits[self.query_col].append(query_name)
self.seq_hits[self.query_len_col].append(query_len)
self.seq_hits[self.acc_col].append(acc)
self.seq_hits['evalue'].append(float(seq_scores[0]))
self.seq_hits['score'].append(float(seq_scores[1]))
self.seq_hits['bias'].append(float(seq_scores[2]))
self.seq_hits['best_dom_evalue'].append(float(seq_scores[3]))
self.seq_hits['best_dom_score'].append(float(seq_scores[4]))
self.seq_hits['best_dom_bias'].append(float(seq_scores[5]))
self.seq_hits['expected_doms'].append(float(seq_scores[6]))
self.seq_hits['num_doms'].append(int(seq_scores[7]))
self.seq_hits[self.target_col].append(seq_scores[8])
num_doms_per_seq[seq_scores[8]] = int(seq_scores[7])
num_seq_hits = len(seq_score_lines)
for _ in range(num_seq_hits):
target_name = self.find_line(lambda line: line.startswith(self.delim_seq)).split()[1]
if num_doms_per_seq[target_name] == 0:
continue
self.line_no += 2
for __ in range(num_doms_per_seq[target_name]):
dom_score_summary = self.find_line(lambda line: True).split()
self.dom_hits[self.query_col].append(query_name)
self.dom_hits[self.acc_col].append(acc)
self.dom_hits[self.target_col].append(target_name)
self.dom_hits['domain'].append(dom_score_summary[0])
self.dom_hits['qual'].append(dom_score_summary[1])
self.dom_hits['score'].append(dom_score_summary[2])
self.dom_hits['bias'].append(dom_score_summary[3])
self.dom_hits['c-evalue'].append(dom_score_summary[4])
self.dom_hits['i-evalue'].append(dom_score_summary[5])
self.dom_hits['hmm_start'].append(dom_score_summary[6])
self.dom_hits['hmm_stop'].append(dom_score_summary[7])
self.dom_hits['hmm_bounds'].append(dom_score_summary[8])
self.dom_hits['ali_start'].append(dom_score_summary[9])
self.dom_hits['ali_stop'].append(dom_score_summary[10])
self.dom_hits['ali_bounds'].append(dom_score_summary[11])
self.dom_hits['env_start'].append(dom_score_summary[12])
self.dom_hits['env_stop'].append(dom_score_summary[13])
self.dom_hits['env_bounds'].append(dom_score_summary[14])
self.dom_hits['mean_post_prob'].append(dom_score_summary[15])
for __ in range(num_doms_per_seq[target_name]):
self.find_line(lambda line: line.lstrip().startswith(self.delim_domain))
if __ == num_doms_per_seq[target_name] - 1:
if _ == num_seq_hits - 1:
# This is the last alignment in the summary_info. Go to end of string
ali_lines = self.read_lines_until(lambda line: False)
else:
# This is the last alignment in the sequence. Go to next sequence delimiter
ali_lines = self.read_lines_until(lambda line: line.lstrip().startswith(self.delim_seq))
self.line_no -= 1
else:
ali_lines = self.read_lines_until(lambda line: line.lstrip().startswith(self.delim_domain))
self.line_no -= 1
consensus = []
match = []
target = []
line_index = 0
while True:
if line_index >= len(ali_lines):
break
line = ali_lines[line_index]
if not line.lstrip().startswith(query_name + ' '):
line_index += 1
continue
cons_seq_fragment = line.split()[2]
frag_len = len(cons_seq_fragment)
ali_index = line.find(cons_seq_fragment)
consensus.append(cons_seq_fragment)
match.append(ali_lines[line_index + 1][ali_index: ali_index + frag_len])
target.append(ali_lines[line_index + 2][ali_index: ali_index + frag_len])
line_index += 2
self.dom_hits['match_state_align'].append(''.join(consensus))
self.dom_hits['comparison_align'].append(''.join(match))
self.dom_hits['sequence_align'].append(''.join(target))
def set_names(self):
"""Set the column names depending on self.context"""
if self.context is None:
self.query_col = 'query'
self.acc_col = 'acc'
self.query_len_col = 'query_len'
self.target_col = 'target'
elif self.context == 'interacdome':
self.query_col = 'pfam_name'
self.acc_col = 'pfam_id'
self.query_len_col = 'pfam_len'
self.target_col = 'corresponding_gene_call'
def additional_processing(self):
"""Further process raw data"""
if self.context is None:
self.get_ali_info()
elif self.context == 'interacdome':
self.seq_hits['corresponding_gene_call'] = self.seq_hits['corresponding_gene_call'].astype(int)
self.dom_hits['corresponding_gene_call'] = self.dom_hits['corresponding_gene_call'].astype(int)
if self.dom_hits.empty:
self.dom_hits['version'] = []
else:
self.dom_hits[['pfam_id', 'version']] = self.dom_hits['pfam_id'].str.split('.', n=1, expand=True)
if self.seq_hits.empty:
self.seq_hits['version'] = []
else:
self.seq_hits[['pfam_id', 'version']] = self.seq_hits['pfam_id'].str.split('.', n=1, expand=True)
# For convenience this is done after pfam_id has been split
self.get_ali_info()
def get_ali_info(self):
"""Creates self.ali_info. See class docstring for description
Notes
=====
- This function is very slow.
- EDIT: This function is not _that_ slow
"""
if self.dom_hits.empty:
return
unique_targets = self.dom_hits[self.target_col].nunique()
self.progress.new('Processing alignment info', progress_total_items=unique_targets)
gap_chars = {'-', '.'}
processed = 0
for target, subset in self.dom_hits.groupby(self.target_col):
if processed % 50 == 0:
self.progress.update('%d/%d done' % (processed, unique_targets))
self.progress.increment(increment_to=processed)
self.ali_info[target] = {}
for acc, subsubset in subset.groupby(self.acc_col):
for i, row in subsubset.iterrows():
seq_positions, seq_chars, hmm_positions, hmm_chars, comparison_chars = [], [], [], [], []
seq_pos, hmm_pos = row['ali_start'], row['hmm_start']
sequence, match_state, comparison = row['sequence_align'], row['match_state_align'], row['comparison_align']
assert len(sequence) == len(match_state)
for i in range(len(sequence)):
seq_char, hmm_char, comparison_char = sequence[i], match_state[i], comparison[i]
if (seq_char not in gap_chars) and (hmm_char not in gap_chars):
# there is alignment (non-gap characters)
seq_positions.append(seq_pos)
seq_chars.append(seq_char)
hmm_positions.append(hmm_pos)
hmm_chars.append(hmm_char.upper())
comparison_chars.append(comparison_char.upper())
seq_pos += 1
hmm_pos += 1
elif (seq_char in gap_chars) and (hmm_char not in gap_chars):
# gap in seq
hmm_pos += 1
elif (seq_char not in gap_chars) and (hmm_char in gap_chars):
# gap in match state
seq_pos += 1
else:
# this happens with 0 probability
pass
# The HMM state and sequence positions are 1-indexed. We subtract by 1 to make them zero-indexed
self.ali_info[target][(acc, row['domain'])] = pd.DataFrame({
'seq': seq_chars,
'hmm': hmm_chars,
'comparison': comparison_chars,
'seq_positions': np.array(seq_positions) - 1,
'hmm_positions': np.array(hmm_positions) - 1,
})
processed += 1
self.progress.end()
class HMMERTableOutput(Parser):
"""Parse --tblout or --domtblout output formats for hmmer programs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME NOTE FIXME
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
<rant>
Parsing of HMMER tabular output needs to be redesigned. This code does not actually take output from hmmer
and parse it. It parses the output file of anvio.driver.HMMER.hmmscan_worker which preprocesses the
output format. The responsibility of HMMER output parsing needs to be consolidated in one spot. Biopython, a
dependency of anvi'o, has an HMMER parser. See https://biopython.org/DIST/docs/api/Bio.SearchIO.HmmerIO-module.html.
Perhaps this is more robust solution. This design is currently hanging on by a thread.
</rant>
Output specifictions of HMMER can be found in the user guide. At time of writing this,
http://eddylab.org/software/hmmer/Userguide.pdf hosts the user guide.
Parameters
==========
hmmer_table_txt: ???
Undocumented FIXME
alphabet: str, 'AA'
Which alphabet do the HMMs use? Pick from {'AA', 'DNA', 'RNA'}
context: str, 'GENE'
This tells the class how the output should be parsed. Pick from {'GENE', 'CONTIG',
'DOMAIN'}. Before being preprocessed by anvio.driver.HMMER.hmmscan_worker (see this module's
docstring), the header of the file should look like so, based on which context you use:
GENE:
# |-- full sequence ---| |-- best 1 domain ---| |-- domain number estimation ---|
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc description
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- --- -----------
DOMAIN:
# --- full sequence --- -------------- this domain ------------- hmm coord ali coord env coord
# target name accession tlen query name accession qlen E-value score bias # of c-Evalue i-Evalue score bias from to from to from to acc description
#------------------- ---------- ----- -------------------- ---------- ----- --------- ------ ----- --- --- --------- --------- ------ ----- ----- ----- ----- ----- ----- ----- ---- -----------
CONTIG:
Undocumented FIXME
`DOMAIN` is untested.
"""
def __init__(self, hmmer_table_txt, alphabet='AA', context='GENE', program='hmmscan', run=terminal.Run()):
self.alphabet = alphabet
self.context = context
self.program = program
self.run = run
files_expected = {'hits': hmmer_table_txt}
if self.context == "GENE":
col_info = self.get_col_info_for_GENE_context()
elif self.context == "CONTIG" and (self.alphabet == "DNA" or self.alphabet == "RNA"):
col_info = self.get_col_info_for_CONTIG_context()
elif self.context == "DOMAIN" and self.alphabet == "AA":
if program != 'hmmsearch':
raise ConfigError("HMMScan :: the 'DOMAIN' context is only available for hmmsearch.")
col_info = self.get_col_info_for_DOMAIN_context()
else:
raise ConfigError("HMMScan driver is confused. Yor context and alphabet pair ('%s' and '%s') "
"does not seem to be implemented in the parser module. If you think this is "
"not a mistake on your part, please get in touch with the anvi'o developers "
"and watch them fix it like actual pros." % (self.context, self.alphabet))
col_names, col_mapping = col_info
files_structure = {
'hits': {
'col_names': col_names,
'col_mapping': col_mapping,
'indexing_field': -1,
'no_header': True,
},
}
Parser.__init__(self, 'HMMScan', [hmmer_table_txt], files_expected, files_structure)
def get_col_info_for_GENE_context(self):
"""Get column names and types for GENE context
See class docstring for details of the fields for AA sequence search, and DNA sequence search.
"""
if self.program == 'hmmscan':
# |-- full sequence ---| |-- best 1 domain ---| |-- domain number estimation ---|
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- ---
col_names = ['gene_name', 'gene_hmm_id', 'gene_callers_id', 'f', 'e_value', 'bit_score', 'f', 'f', 'dom_bit_score', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f']
col_mapping = [str, str, int, str, float, float, str, str, float, str, str, str, str, str, str, str, str, str]
elif self.program == 'hmmsearch':
# |-- full sequence ---| |-- best 1 domain ---| |-- domain number estimation ---|
# target name accession query name accession E-value score bias E-value score bias exp reg clu ov env dom rep inc
#------------------- ---------- -------------------- ---------- --------- ------ ----- --------- ------ ----- --- --- --- --- --- --- --- ---
col_names = ['gene_callers_id', 'f', 'gene_name', 'gene_hmm_id', 'e_value', 'bit_score', 'f', 'f', 'dom_bit_score', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f']
col_mapping = [int, str, str, str, float, float, str, str, float, str, str, str, str, str, str, str, str, str]
else:
raise ConfigError("The HMMScan Parser class is not sure if you know what you are doing. You told it that you wanted to "
"parse HMM hits from the program %s, but this class doesn't know how to handle those." % (self.program))
return col_names, col_mapping
def get_col_info_for_CONTIG_context(self):
"""Get column names and types for GENE context
See class docstring for details of the fields for AA sequence search, and DNA sequence search.
"""
# 'hmm_target', 'hmm_acc', 'query_id', 'query_acc', 'hmm_from', 'hmm_to', 'alignment_from', 'alignment_to', 'envelope_from', 'envelope_to', 'seq_len', 'strand', 'e_value', 'score', 'bias',]
col_names = ['gene_name', 'gene_hmm_id', 'contig_name', 'f', 'hmm_from', 'hmm_to', 'alignment_from', 'alignment_to', 'envelope_from', 'envelope_to', 'f', 'f', 'e_value', 'f', 'f']
col_mapping = [str, str, str, str, str, str, int, int, int, int, str, str, float, str, str]
return col_names, col_mapping
def get_col_info_for_DOMAIN_context(self):
"""Get column names and types for DOMAIN context
See class docstring for details of the fields
"""
col_info = [
('gene_callers_id', int), # target name
('f', str), # accession
('gene_length', int), # tlen
('hmm_name', str), # query name
('hmm_id', str), # accession
('hmm_length', int), # qlen
('evalue', float), # E-value (full sequence)
('bitscore', float), # score (full sequence)
('bias', float), # bias (full sequence)
('match_num', int), # # (this domain)
('num_matches', int), # of (this domain)
('dom_c_evalue', float), # c-Evalue (this domain)
('dom_i_evalue', float), # i-Evalue (this domain)
('dom_bitscore', str), # score (this domain)
('dom_bias', float), # bias (this domain)
('hmm_start', int), # from (hmm coord)
('hmm_stop', int), # to (hmm coord)
('gene_start', int), # from (ali coord)
('gene_stop', int), # to (ali coord)
('f', str), # from (env coord)
('f', str), # to (env coord)
('mean_post_prob', float), # acc
]
return list(zip(*col_info))
def get_search_results(self, noise_cutoff_dict=None, return_bitscore_dict=False):
"""Goes through the hits provided by `hmmscan` and generates an annotation dictionary with the relevant information about each hit.
This function makes sure only hits with a high enough bit score make it into the annotation dictionary.
Parameters
==========
noise_cutoff_dict : dictionary
dictionary of noise cutoff terms; see setup_ko_dict in kofam.py for an example
return_bitscore_dict : boolean
if True, this function will also return a dictionary of bitscores for each hit
Returns
=======
annotations_dict : dictionary
dictionary of annotations, one annotation per HMM hit
bitscore_dict : dictionary
dictionary of bitscore information, one entry per HMM hit, including full and domain-level bitscore.
only returned if return_bitscore_dict is True, and only applies to GENE context.
"""
annotations_dict = {}
bit_score_info_dict = {}
# this is the stuff we are going to try to fill with this:
# search_table_structure = ['entry_id', 'source', 'alphabet', 'contig', 'gene_callers_id' 'gene_name', 'gene_hmm_id', 'e_value']
entry_id = 0
num_hits_removed = 0 # a counter for the number of hits we don't add to the annotation dictionary
for hit in list(self.dicts['hits'].values()):
entry = None
bit_score_info_dict_entry = None
if self.context == 'GENE':
# Here we only add the hit to the annotations_dict if the appropriate bit score is above the
# threshold set in noise_cutoff_dict (which is indexed by profile name (aka gene_name in the hits dict)
if noise_cutoff_dict and hit['gene_name'] in noise_cutoff_dict.keys():
hmm_entry_name = hit['gene_name']
score_type = noise_cutoff_dict[hmm_entry_name]['score_type']
threshold = noise_cutoff_dict[hmm_entry_name]['threshold']
keep = True
if score_type == 'full':
if hit['bit_score'] < float(threshold):
keep = False
elif score_type == 'domain':
if hit['dom_bit_score'] < float(threshold):
keep = False
else:
self.run.warning("Oh dear. The HMM profile %s has a strange score_type value: %s. The only accepted values "
"for this type are 'full' or 'domain', so anvi'o cannot parse the hits to this profile. All hits "
"will be kept regardless of bit score. You have been warned." % (hit['gene_name'], score_type))
if keep:
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value']}
if return_bitscore_dict:
bit_score_info_dict_entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value'],
'bit_score': hit['bit_score'],
'domain_bit_score': hit['dom_bit_score']}
else:
num_hits_removed += 1
elif noise_cutoff_dict and hit['gene_name'] not in noise_cutoff_dict.keys():
# this should never happen, in an ideal world where everything is filled with butterflies and happiness
self.run.warning("Hmm. While parsing your HMM hits, it seems the HMM profile %s was not found in the noise cutoff dictionary. "
"This should probably not ever happen, and you should contact a developer as soon as possible to figure out what "
"is going on. But for now, anvi'o is going to keep all hits to this profile. Consider those hits with a grain of salt, "
"as not all of them may be good." % hit['gene_name'])
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value']}
if return_bitscore_dict:
bit_score_info_dict_entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value'],
'bit_score': hit['bit_score'],
'domain_bit_score': hit['dom_bit_score']}
else:
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value']}
if return_bitscore_dict:
bit_score_info_dict_entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'gene_callers_id': hit['gene_callers_id'],
'e_value': hit['e_value'],
'bit_score': hit['bit_score'],
'domain_bit_score': hit['dom_bit_score']}
elif self.context == 'CONTIG' and (self.alphabet == 'DNA' or self.alphabet == 'RNA'):
entry = {'entry_id': entry_id,
'gene_name': hit['gene_name'],
'gene_hmm_id': hit['gene_hmm_id'],
'contig_name': hit['contig_name'],
'start': hit['alignment_from'],
'stop': hit['alignment_to'],
'e_value': hit['e_value']}
else:
raise ConfigError("Anvi'o does not know how to parse %s:%s" % (self.alphabet, self.context))
if entry:
entry_id += 1
annotations_dict[entry_id] = entry
if return_bitscore_dict and bit_score_info_dict_entry:
bit_score_info_dict[entry_id] = bit_score_info_dict_entry
self.run.info("Number of weak hits removed", num_hits_removed)
self.run.info("Number of hits in annotation dict ", len(annotations_dict.keys()))
if return_bitscore_dict:
return annotations_dict, bit_score_info_dict
return annotations_dict
| gpl-3.0 |
kdebrab/pandas | pandas/core/dtypes/generic.py | 5 | 3609 | """ define generic base classes for pandas objects """
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, '_typ') in comp
dct = dict(__instancecheck__=_check, __subclasscheck__=_check)
meta = type("ABCBase", (type, ), dct)
return meta(name, tuple(), dct)
ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", ))
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ",
("int64index", ))
ABCUInt64Index = create_pandas_abc_type("ABCUInt64Index", "_typ",
("uint64index", ))
ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ",
("rangeindex", ))
ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ",
("float64index", ))
ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ",
("multiindex", ))
ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ",
("datetimeindex", ))
ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ",
("timedeltaindex", ))
ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ",
("periodindex", ))
ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ",
("categoricalindex", ))
ABCIntervalIndex = create_pandas_abc_type("ABCIntervalIndex", "_typ",
("intervalindex", ))
ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ",
("index", "int64index", "rangeindex",
"float64index", "uint64index",
"multiindex", "datetimeindex",
"timedeltaindex", "periodindex",
"categoricalindex", "intervalindex"))
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", ))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", ))
ABCSparseDataFrame = create_pandas_abc_type("ABCSparseDataFrame", "_subtyp",
("sparse_frame", ))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ",
("categorical"))
ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ",
("dateoffset",))
ABCInterval = create_pandas_abc_type("ABCInterval", "_typ", ("interval", ))
ABCExtensionArray = create_pandas_abc_type("ABCExtensionArray", "_typ",
("extension", "categorical",))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
| bsd-3-clause |
alexsavio/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.