repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
olafhauk/mne-python
|
tutorials/time-freq/plot_sensors_time_frequency.py
|
5
|
7867
|
"""
.. _tut-sensors-time-freq:
============================================
Frequency and time-frequency sensor analysis
============================================
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use this dataset: :ref:`somato-dataset`. It contains so-called event
related synchronizations (ERS) / desynchronizations (ERD) in the beta band.
""" # noqa: E501
# Authors: Alexandre Gramfort <[email protected]>
# Stefan Appelhoff <[email protected]>
# Richard Höchenberger <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch
from mne.datasets import somato
###############################################################################
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
epochs.resample(200., npad='auto') # resample to reduce computation time
###############################################################################
# Frequency analysis
# ------------------
#
# We start by exploring the frequence content of our epochs.
###############################################################################
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2., fmax=40., average=True, spatial_colors=False)
###############################################################################
# Now let's take a look at the spatial distributions of the PSD.
epochs.plot_psd_topomap(ch_type='grad', normalize=True)
###############################################################################
# Alternatively, you can also create PSDs from Epochs objects with functions
# that start with ``psd_`` such as
# :func:`mne.time_frequency.psd_multitaper` and
# :func:`mne.time_frequency.psd_welch`.
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)
psds = 10. * np.log10(psds)
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
plt.show()
###############################################################################
# Notably, :func:`mne.time_frequency.psd_welch` supports the keyword argument
# ``average``, which specifies how to estimate the PSD based on the individual
# windowed segments. The default is ``average='mean'``, which simply calculates
# the arithmetic mean across segments. Specifying ``average='median'``, in
# contrast, returns the PSD based on the median of the segments (corrected for
# bias relative to the mean), which is a more robust measure.
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=1)
psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs)
psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx),
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
plt.show()
###############################################################################
# Lastly, we can also retrieve the unaggregated segments by passing
# ``average=None`` to :func:`mne.time_frequency.psd_welch`. The dimensions of
# the returned array are ``(n_epochs, n_sensors, n_freqs, n_segments)``.
psds_welch_unagg, freqs_unagg = psd_welch(epochs, average=None, **kwargs)
print(psds_welch_unagg.shape)
###############################################################################
# .. _inter-trial-coherence:
#
# Time-frequency analysis: power and inter-trial coherence
# --------------------------------------------------------
#
# We now compute time-frequency representations (TFRs) from our Epochs.
# We'll look at power and inter-trial coherence (ITC).
#
# To this we'll use the function :func:`mne.time_frequency.tfr_morlet`
# but you can also use :func:`mne.time_frequency.tfr_multitaper`
# or :func:`mne.time_frequency.tfr_stockwell`.
# define frequencies of interest (log-spaced)
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
###############################################################################
# Inspect power
# -------------
#
# .. note::
# The generated figures are interactive. In the topo you can click
# on an image to visualize the data for one sensor.
# You can also select a portion in the time-frequency plane to
# obtain a topomap for a certain time-frequency region.
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', show=False)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Joint Plot
# ----------
# You can also create a joint plot showing both the aggregated TFR
# across channels and topomaps at specific times and frequencies to obtain
# a quick overview regarding oscillatory effects across time and space.
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(.5, 10), (1.3, 8)])
###############################################################################
# Inspect ITC
# -----------
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
###############################################################################
# .. note::
# Baseline correction can be applied to power or done in plots.
# To illustrate the baseline correction in plots, the next line is
# commented power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
#
# Exercise
# --------
#
# - Visualize the inter-trial coherence values as topomaps as done with
# power.
|
bsd-3-clause
|
ivastar/clear
|
grizli_reduction.py
|
1
|
33227
|
#!/home/rsimons/miniconda2/bin/python
import matplotlib
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import drizzlepac
import grizli
import glob
from grizli import utils
import importlib
from grizli.prep import process_direct_grism_visit
#from hsaquery import query, overlaps
from grizli.pipeline import auto_script
from grizli.multifit import GroupFLT, MultiBeam, get_redshift_fit_defaults
import os, sys, argparse
from grizli.pipeline import photoz
from astropy.table import Table
import eazy
from joblib import Parallel, delayed
from glob import glob
from mastquery import query, overlaps
import gc
plt.ioff()
plt.close('all')
def parse():
'''
Parse command line arguments
'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='''CLEAR grizli extractions.''')
parser.add_argument('-field', '--field', default='GS1', help='field to extract')
parser.add_argument('-mag_lim', '--mag_lim', type = int, default=25, help='field to extract')
parser.add_argument('-mag_max', '--mag_max', type = int, default= 0, help='field to extract')
parser.add_argument('-zr_min', '--zr_min', type = float, default= 0., help='field to extract')
parser.add_argument('-zr_max', '--zr_max', type = float, default= 12., help='field to extract')
parser.add_argument('-do_files', '--do_files', default = True, help = 'bool to load files')
parser.add_argument('-do_model', '--do_model', default = True, help = 'bool to model spectra')
parser.add_argument('-run_parallel', '--run_parallel', action = "store_true", default = False, help = 'fit with photometry')
parser.add_argument('-fwop', '--fwop', action = "store_true", default = False, help = 'fit with photometry')
parser.add_argument('-do_retrieve', '--do_retrieve', action = "store_true", default = False, help = 'bool to retrieve files from MAST')
parser.add_argument('-on_jase', '--on_jase', action = "store_true", default = False, help = 'bool to retrieve files from MAST')
parser.add_argument('-do_prep', '--do_prep', action = "store_true", default = False, help = 'bool to PREP files with Grizli')
parser.add_argument('-do_new_model', '--do_new_model', action = "store_true", default = False, help = 'bool to create new Grizli models')
parser.add_argument('-do_beams', '--do_beams', action = "store_true", default = False, help = 'bool to write beams files')
parser.add_argument('-do_fit', '--do_fit', action = "store_true", default = False, help = 'bool to fit modeled spectra')
parser.add_argument('-use_psf', '--use_psf', action = "store_true", default = False, help = 'use psf extraction in fitting routine')
parser.add_argument('-make_catalog', '--make_catalog', action = "store_true", default = False, help = 'use psf extraction in fitting routine')
parser.add_argument('-use_phot', '--use_phot', action = "store_true", default = False, help = 'use psf extraction in fitting routine')
parser.add_argument('-fit_min_id', '--fit_min_id', type = int, default = 0, help = 'ID to start on for the fit')
parser.add_argument('-n_jobs', '--n_jobs', type = int, default = -1, help = 'number of threads')
parser.add_argument('-id_choose', '--id_choose', type = int, default = None, help = 'ID to fit')
parser.add_argument('-pso', '--pso', type = int, default = 1, help = 'phot_scale_order')
parser.add_argument('-PATH_TO_RAW' , '--PATH_TO_RAW' , default = '/user/rsimons/grizli_extractions/RAW', help = 'path to RAW directory')
parser.add_argument('-PATH_TO_PREP' , '--PATH_TO_PREP' , default = '/user/rsimons/grizli_extractions/PREP', help = 'path to prep directory')
parser.add_argument('-PATH_TO_SCRIPTS', '--PATH_TO_SCRIPTS', default = '/user/rsimons/git/clear_local', help = 'path to scripts directory')
parser.add_argument('-PATH_TO_CATS' , '--PATH_TO_CATS' , default = '/user/rsimons/grizli_extractions/Catalogs', help = 'path to catalog directory')
parser.add_argument('-PATH_TO_HOME' , '--PATH_TO_HOME' , default = '/user/rsimons/grizli_extractions', help = 'path to home directory sans field')
args = vars(parser.parse_args())
return args
def readEazyBinary(MAIN_OUTPUT_FILE='photz', OUTPUT_DIRECTORY='./OUTPUT', CACHE_FILE='Same'):
"""
Author: Gabe Brammer
This function has been clipped from eazyPy.py in thethreedhst git respository
https://github.com/gbrammer/threedhst/tree/master/threedhst
tempfilt, coeffs, temp_sed, pz = readEazyBinary(MAIN_OUTPUT_FILE='photz', \
OUTPUT_DIRECTORY='./OUTPUT', \
CACHE_FILE = 'Same')
Read Eazy BINARY_OUTPUTS files into structure data.
If the BINARY_OUTPUTS files are not in './OUTPUT', provide either a relative or absolute path
in the OUTPUT_DIRECTORY keyword.
By default assumes that CACHE_FILE is MAIN_OUTPUT_FILE+'.tempfilt'.
Specify the full filename if otherwise.
"""
#root='COSMOS/OUTPUT/cat3.4_default_lines_zp33sspNoU'
root = OUTPUT_DIRECTORY+'/'+MAIN_OUTPUT_FILE
###### .tempfilt
if CACHE_FILE == 'Same':
CACHE_FILE = root+'.tempfilt'
if os.path.exists(CACHE_FILE) is False:
print(('File, %s, not found.' %(CACHE_FILE)))
return -1,-1,-1,-1
f = open(CACHE_FILE,'rb')
s = np.fromfile(file=f,dtype=np.int32, count=4)
NFILT=s[0]
NTEMP=s[1]
NZ=s[2]
NOBJ=s[3]
tempfilt = np.fromfile(file=f,dtype=np.double,count=NFILT*NTEMP*NZ).reshape((NZ,NTEMP,NFILT)).transpose()
lc = np.fromfile(file=f,dtype=np.double,count=NFILT)
zgrid = np.fromfile(file=f,dtype=np.double,count=NZ)
fnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()
efnu = np.fromfile(file=f,dtype=np.double,count=NFILT*NOBJ).reshape((NOBJ,NFILT)).transpose()
f.close()
tempfilt = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\
'tempfilt':tempfilt,'lc':lc,'zgrid':zgrid,'fnu':fnu,'efnu':efnu}
###### .coeff
f = open(root+'.coeff','rb')
s = np.fromfile(file=f,dtype=np.int32, count=4)
NFILT=s[0]
NTEMP=s[1]
NZ=s[2]
NOBJ=s[3]
coeffs = np.fromfile(file=f,dtype=np.double,count=NTEMP*NOBJ).reshape((NOBJ,NTEMP)).transpose()
izbest = np.fromfile(file=f,dtype=np.int32,count=NOBJ)
tnorm = np.fromfile(file=f,dtype=np.double,count=NTEMP)
f.close()
coeffs = {'NFILT':NFILT,'NTEMP':NTEMP,'NZ':NZ,'NOBJ':NOBJ,\
'coeffs':coeffs,'izbest':izbest,'tnorm':tnorm}
###### .temp_sed
f = open(root+'.temp_sed','rb')
s = np.fromfile(file=f,dtype=np.int32, count=3)
NTEMP=s[0]
NTEMPL=s[1]
NZ=s[2]
templam = np.fromfile(file=f,dtype=np.double,count=NTEMPL)
temp_seds = np.fromfile(file=f,dtype=np.double,count=NTEMPL*NTEMP).reshape((NTEMP,NTEMPL)).transpose()
da = np.fromfile(file=f,dtype=np.double,count=NZ)
db = np.fromfile(file=f,dtype=np.double,count=NZ)
f.close()
temp_sed = {'NTEMP':NTEMP,'NTEMPL':NTEMPL,'NZ':NZ,\
'templam':templam,'temp_seds':temp_seds,'da':da,'db':db}
###### .pz
if os.path.exists(root+'.pz'):
f = open(root+'.pz','rb')
s = np.fromfile(file=f,dtype=np.int32, count=2)
NZ=s[0]
NOBJ=s[1]
chi2fit = np.fromfile(file=f,dtype=np.double,count=NZ*NOBJ).reshape((NOBJ,NZ)).transpose()
### This will break if APPLY_PRIOR No
s = np.fromfile(file=f,dtype=np.int32, count=1)
if len(s) > 0:
NK = s[0]
kbins = np.fromfile(file=f,dtype=np.double,count=NK)
priorzk = np.fromfile(file=f, dtype=np.double, count=NZ*NK).reshape((NK,NZ)).transpose()
kidx = np.fromfile(file=f,dtype=np.int32,count=NOBJ)
pz = {'NZ':NZ,'NOBJ':NOBJ,'NK':NK, 'chi2fit':chi2fit, 'kbins':kbins, 'priorzk':priorzk,'kidx':kidx}
else:
pz = None
f.close()
else:
pz = None
if False:
f = open(root+'.zbin','rb')
s = np.fromfile(file=f,dtype=np.int32, count=1)
NOBJ=s[0]
z_a = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_p = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_m1 = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_m2 = np.fromfile(file=f,dtype=np.double,count=NOBJ)
z_peak = np.fromfile(file=f,dtype=np.double,count=NOBJ)
f.close()
###### Done.
return tempfilt, coeffs, temp_sed, pz
class Pointing():
""" Generalization of GN1, GS1, ERSPRIME, etc
To change field-dependent catalog, seg map, ref image, and padding
only need to change them here.
"""
def __init__(self, field, ref_filter):
if 'N' in field.upper():
self.pad = 200
#self.radec_catalog = PATH_TO_CATS + '/goodsN_radec.cat'
self.radec_catalog = PATH_TO_CATS + '/gdn_radec_f140_14_24.cat'
self.seg_map = PATH_TO_CATS + '/Goods_N_plus_seg.fits'
self.catalog = PATH_TO_CATS + '/goodsn-F105W-astrodrizzle-v4.4_drz_sub_plus.cat'
#self.catalog = PATH_TO_CATS + '/goodsn-v4.4-withunmatched.cat'
self.ref_image = PATH_TO_CATS + '/goodsn-F105W-astrodrizzle-v4.4_drz_sci.fits'
#self.tempfilt, self.coeffs, self.temp_sed, self.pz = readEazyBinary(MAIN_OUTPUT_FILE='goodsn_3dhst.v4.4', OUTPUT_DIRECTORY=PATH_TO_CATS, CACHE_FILE='Same')
self.params = {}
#self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodsn', 'v4.3')
self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodsn', 'v4.4', 'goodsn', 'v4.4')
self.params['Z_STEP'] = 0.002
self.params['Z_MAX'] = 4
self.params['MAIN_OUTPUT_FILE'] = '{0}_3dhst.{1}.eazypy'.format('goodsn', 'v4.4')
self.params['PRIOR_FILTER'] = 205
self.params['MW_EBV'] = {'aegis':0.0066, 'cosmos':0.0148, 'goodss':0.0069,
'uds':0.0195, 'goodsn':0.0103}['goodsn']
self.params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'
#self.translate_file = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Eazy/{0}_3dhst.{1}.translate'.format('goodsn', 'v4.3')
self.translate_file = PATH_TO_CATS + '/{0}_{1}.translate'.format('goodsn', 'v4.4')
elif 'S' in field.upper():
self.pad = 200 # grizli default
#self.radec_catalog = '../Catalogs/goodsS_radec.cat'
#self.radec_catalog = PATH_TO_CATS + '/goodsS_radec.cat'
self.radec_catalog = PATH_TO_CATS + '/gds_radec_f140_14_24.cat'
self.seg_map = PATH_TO_CATS + '/Goods_S_plus_seg.fits'
self.catalog = PATH_TO_CATS + '/goodss-F105W-astrodrizzle-v4.3_drz_sub_plus.cat'
#self.catalog = PATH_TO_CATS + '/goodss-v4.4-withunmatched.cat'
self.ref_image = PATH_TO_CATS + '/goodss-F105W-astrodrizzle-v4.3_drz_sci.fits'
#self.tempfilt, self.coeffs, self.temp_sed, self.pz = readEazyBinary(MAIN_OUTPUT_FILE='goodss_3dhst.v4.3', OUTPUT_DIRECTORY=PATH_TO_CATS, CACHE_FILE='Same')
self.params = {}
#self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodss', 'v4.3')
self.params['CATALOG_FILE'] = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Catalog/{0}_3dhst.{1}.cat'.format('goodss', 'v4.4', 'goodss', 'v4.4')
self.params['Z_STEP'] = 0.002
self.params['Z_MAX'] = 4
self.params['MAIN_OUTPUT_FILE'] = '{0}_3dhst.{1}.eazypy'.format('goodss', 'v4.4')
self.params['PRIOR_FILTER'] = 205
self.params['MW_EBV'] = {'aegis':0.0066, 'cosmos':0.0148, 'goodss':0.0069,
'uds':0.0195, 'goodsn':0.0103}['goodsn']
self.params['TEMPLATES_FILE'] = 'templates/fsps_full/tweak_fsps_QSF_12_v3.param'
#self.translate_file = PATH_TO_CATS + '/{0}_3dhst.{1}.cats/Eazy/{0}_3dhst.{1}.translate'.format('goodss', 'v4.3')
self.translate_file = PATH_TO_CATS + '/{0}_{1}.translate'.format('goodss', 'v4.4')
def grizli_getfiles(run = True):
if run == False: return
else: 'Running grizli_getfiles...'
os.chdir(PATH_TO_PREP)
files = glob('%s/*flt.fits'%PATH_TO_RAW)
info = grizli.utils.get_flt_info(files)
visits, filters = grizli.utils.parse_flt_files(info=info, uniquename=True)
return visits, filters
def grizli_prep(visits, field = '', run = True):
if run == False: return
else: 'Running grizli_prep...'
print ('\n\n\n\n\n\n\n')
product_names = np.array([visit['product'] for visit in visits])
filter_names = np.array([visit['product'].split('-')[-1] for visit in visits])
basenames = np.array([visit['product'].split('.')[0]+'.0' for visit in visits])
for ref_grism, ref_filter in [('G102', 'F105W'), ('G141', 'F140W')]:
print ('Processing %s + %s visits'%(ref_grism, ref_filter))
for v, visit in enumerate(visits):
product = product_names[v]
basename = basenames[v]
filt1 = filter_names[v]
#print (filt1.lower())
field_in_contest = basename.split('-')[0]
#print (field_in_contest)
#if field_in_contest.upper() == field.upper() or field_in_contest.upper() in overlapping_fields[field]:
if (ref_filter.lower() == filt1.lower()):
#found a direct image, now search for grism counterpart
if len(np.where((basenames == basename) & (filter_names == ref_grism.lower()))[0]) > 0:
grism_index= np.where((basenames == basename) & (filter_names == ref_grism.lower()))[0][0]
#print(grism_index)
p = Pointing(field = field, ref_filter = ref_filter)
radec_catalog = p.radec_catalog
print (field_in_contest, visits[grism_index], radec_catalog)
#radec_catalog = None
status = process_direct_grism_visit(direct = visit,
grism = visits[grism_index],
radec = radec_catalog,
align_mag_limits = [14, 24])
else:
print ('no grism associated with direct image %s'%basename)
return visits, filters
def grizli_model(visits, field = '', ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141', run = True, new_model = False, mag_lim = 25):
if run == False: return
all_grism_files = []
all_direct_files = []
product_names = np.array([visit['product'] for visit in visits])
filter_names = np.array([visit['product'].split('-')[-1] for visit in visits])
basenames = np.array([visit['product'].split('.')[0]+'.0' for visit in visits])
for v, visit in enumerate(visits):
product = product_names[v]
basename = basenames[v]
filt1 = filter_names[v]
if (ref_filter_1.lower() in filt1) or (ref_filter_2.lower() in filt1):
all_direct_files.extend(visit['files'])
grism_index_1 = np.where((basenames == basename) & (filter_names == ref_grism_1.lower()))[0]
grism_index_2 = np.where((basenames == basename) & (filter_names == ref_grism_2.lower()))[0]
if len(grism_index_1) > 0: all_grism_files.extend(visits[grism_index_1[0]]['files'])
if len(grism_index_2) > 0: all_grism_files.extend(visits[grism_index_2[0]]['files'])
p = Pointing(field=field, ref_filter=ref_filter_1)
if not new_model: print('Loading contamination models...')
else: print('Initializing contamination models...')
grp = GroupFLT(
grism_files=all_grism_files,
direct_files=[],
ref_file = p.ref_image,
seg_file = p.seg_map,
catalog = p.catalog,
pad=p.pad,
cpu_count=4)
if new_model:
print('Computing contamination models with flat model...')
grp.compute_full_model(mag_limit=25, cpu_count = 4)
print('Refine continuum/contamination models with poly_order polynomial, subtracting off contamination..')
grp.refine_list(poly_order=2, mag_limits=[16, 24], verbose=False)
#poly_order = 3
print('Saving contamination models')
grp.save_full_data()
return grp
def grizli_beams(grp, id, min_id, mag, field = '', mag_lim = 35, mag_lim_lower = 35,fcontam = 0.2):
if (mag <= mag_lim) & (mag >=mag_lim_lower) & (id > min_id):
#print(id, mag)
beams = grp.get_beams(id, size=80)
# can separate beams extraction, save, load in without needing models
if beams != []:
print("beams: ", beams)
#mb = grizli.multifit.MultiBeam(beams, fcontam=1.0, group_name=field)
mb = grizli.multifit.MultiBeam(beams, fcontam=fcontam, group_name=field)
mb.write_master_fits()
def grizli_fit(id, min_id, mag, field = '', mag_lim = 35, mag_lim_lower = 35, run = True,
id_choose = None, ref_filter = 'F105W', use_pz_prior = True, use_phot = True,
scale_phot = True, templ0 = None, templ1 = None, ep = None, pline = None,
fcontam = 0.2, phot_scale_order = 1, use_psf = False, fit_without_phot = True, zr = [0., 12.]):
if os.path.exists(field + '_' + '%.5i.full.fits'%id): return
if (mag <= mag_lim) & (mag >=mag_lim_lower) & (id > min_id):
if (id_choose is not None) & (id != id_choose): return
#if os.path.isfile(field + '_' + '%.5i.stack.fits'%id): return
if os.path.isfile(field + '_' + '%.5i.beams.fits'%id):
print('Reading in beams.fits file for %.5i'%id)
mb = grizli.multifit.MultiBeam(field + '_' + '%.5i.beams.fits'%id, fcontam=fcontam, group_name=field)
wave = np.linspace(2000,2.5e4,100)
try:
print ('creating poly_templates...')
poly_templates = grizli.utils.polynomial_templates(wave=wave, order=7,line=False)
pfit = mb.template_at_z(z=0, templates=poly_templates, fit_background=True, fitter='lstsq', fwhm=1400, get_uncertainties=2)
except:
print ('exception in poly_templates...')
return
# Fit polynomial model for initial continuum subtraction
if pfit != None:
#try:
try:
print ('drizzle_grisms_and_PAs...')
hdu, fig = mb.drizzle_grisms_and_PAs(size=32, fcontam=fcontam, flambda=False, scale=1,
pixfrac=0.5, kernel='point', make_figure=True, usewcs=False,
zfit=pfit,diff=True)
# Save drizzled ("stacked") 2D trace as PNG and FITS
fig.savefig('{0}_diff_{1:05d}.stack.png'.format(field, id))
hdu.writeto('{0}_diff_{1:05d}.stack.fits'.format(field, id), clobber=True)
except:
pass
if use_pz_prior:
#use redshift prior from z_phot
prior = np.zeros((2, len(p.tempfilt['zgrid'])))
prior[0] = p.tempfilt['zgrid']
prior[1] = p.pz['chi2fit'][:,id]
else:
prior = None
if fit_without_phot: phot = None
else:
print ('reading phot...')
tab = utils.GTable()
tab['ra'], tab['dec'], tab['id'] = [mb.ra], [mb.dec], id
phot, ii, dd = ep.get_phot_dict(tab['ra'][0], tab['dec'][0])
# Gabe suggests use_psf = True for point sources
if False:
try:
out = grizli.fitting.run_all(
id,
t0=templ0,
t1=templ1,
fwhm=1200,
zr=zr, #zr=[0.0, 12.0], #suggests zr = [0, 12.0] if we want to extend redshift fit
dz=[0.004, 0.0005],
fitter='nnls',
group_name=field,# + '_%i'%phot_scale_order,
fit_stacks=False, #suggests fit_stacks = False, fit to FLT files
prior=None,
fcontam=fcontam, #suggests fcontam = 0.2
pline=pline,
mask_sn_limit=np.inf, #suggests mask_sn_limit = np.inf
fit_only_beams=True, #suggests fit_only_beams = True
fit_beams=False, #suggests fit_beams = False
root=field,
fit_trace_shift=False,
bad_pa_threshold = np.inf, #suggests bad_pa_threshold = np.inf
phot=phot,
verbose=True,
scale_photometry=phot_scale_order,
show_beams=True,
use_psf = use_psf) #default: False
except:
print ('----------------\n----------------\n----------------\n----------------\n----------------\n')
print ('EXCEPTION IN FIT', id, mag)
print ('----------------\n----------------\n----------------\n----------------\n----------------\n')
pass
else:
out = grizli.fitting.run_all(
id,
t0=templ0,
t1=templ1,
fwhm=1200,
zr=zr, #zr=[0.0, 12.0], #suggests zr = [0, 12.0] if we want to extend redshift fit
dz=[0.004, 0.0005],
fitter='nnls',
group_name=field,# + '_%i'%phot_scale_order,
fit_stacks=False, #suggests fit_stacks = False, fit to FLT files
prior=None,
fcontam=fcontam, #suggests fcontam = 0.2
pline=pline,
mask_sn_limit=np.inf, #suggests mask_sn_limit = np.inf
fit_only_beams=True, #suggests fit_only_beams = True
fit_beams=False, #suggests fit_beams = False
root=field,
fit_trace_shift=False,
bad_pa_threshold = np.inf, #suggests bad_pa_threshold = np.inf
phot=phot,
verbose=True,
scale_photometry=phot_scale_order,
show_beams=True,
use_psf = use_psf) #default: False
print('Finished', id, mag)
else: return
def retrieve_archival_data(field, retrieve_bool = False):
if retrieve_bool == False: return
os.chdir(HOME_PATH)
parent = query.run_query(box = None, proposal_id = [14227], instruments=['WFC3/IR', 'ACS/WFC'],
filters = ['G102'], target_name = field)
tabs = overlaps.find_overlaps(parent, buffer_arcmin=0.1,
filters=['G102', 'G141'],
instruments=['WFC3/IR','WFC3/UVIS','ACS/WFC'], close=False)
pids = list(np.unique(tabs[0]['proposal_id']))
tabs = overlaps.find_overlaps(parent, buffer_arcmin=0.1, proposal_id = pids,
filters=['G102', 'G141', 'F098M', 'F105W', 'F125W', 'F140W'],
instruments=['WFC3/IR','WFC3/UVIS','ACS/WFC'], close=False)
footprint_fits_file = glob('*footprint.fits')[0]
jtargname = footprint_fits_file.strip('_footprint.fits')
#auto_script.fetch_files(field_root=jtargname, HOME_PATH=HOME_PATH, remove_bad=True, reprocess_parallel=False)
print (pids)
if __name__ == '__main__':
global PATH_TO_RAW, PATH_TO_PREP, PATH_TO_SCRIPTS, HOME_PATH, to_fits
#to_fits = np.array([9116, 16736, 18108, 15610, 19451])
args = parse()
#to_fits = np.array([17829])
#id_choose = 23116
field = args['field']
run_parallel = args['run_parallel']
mag_lim = args['mag_lim']
mag_max = args['mag_max']
files_bool = args['do_files']
retrieve_bool = args['do_retrieve']
prep_bool = args['do_prep']
model_bool = args['do_model']
on_jase = args['on_jase']
new_model = args['do_new_model']
fit_bool = args['do_fit']
beams_bool = args['do_beams']
use_psf = args['use_psf']
fit_min_id = args['fit_min_id']
n_jobs = args['n_jobs']
id_choose = args['id_choose']
phot_scale_order = args['pso']
fit_without_phot = args['fwop']
PATH_TO_SCRIPTS = args['PATH_TO_SCRIPTS']
PATH_TO_CATS = args['PATH_TO_CATS']
#PATH_TO_CATS = '/Users/rsimons/Desktop/clear/Catalogs'
PATH_TO_HOME = args['PATH_TO_HOME']
if on_jase:
PATH_TO_HOME = '/Users/rsimons/Desktop/clear/grizli_extractions'
PATH_TO_SCRIPTS = '/Users/rsimons/Dropbox/git/clear_local'
else:
PATH_TO_HOME = '/Users/rsimons/Desktop/clear/grizli_extractions'
PATH_TO_SCRIPTS = '/Users/rsimons/Dropbox/git/clear_local'
HOME_PATH = PATH_TO_HOME + '/' + field
make_catalog = args['make_catalog']
if fit_without_phot: phot_scale_order = -1
if on_jase:
PATH_TO_PREP = glob(HOME_PATH + '/Prep')[0]
else:
PATH_TO_RAW = glob(HOME_PATH + '/*/RAW')[0]
PATH_TO_PREP = glob(HOME_PATH + '/*/Prep')[0]
print('\n\n\n\n###################\nParameters\n\n')
print('field ', field )
print('mag_lim ', mag_lim )
print('mag_max ', mag_max )
print('files_bool ', files_bool )
print('retrieve_bool ', retrieve_bool )
print('prep_bool ', prep_bool )
print('model_bool ', model_bool )
print('new_model ', new_model )
print('beams_bool ', beams_bool )
print('fit_bool ', fit_bool )
print('use_psf ', use_psf )
print('fit_min_id ', fit_min_id )
print('n_jobs ', n_jobs )
print('id_choose ', id_choose )
print('phot_scale_order ', phot_scale_order )
print('fit_without_phot ', fit_without_phot )
print('PATH_TO_SCRIPTS ', PATH_TO_SCRIPTS )
print('PATH_TO_CATS ', PATH_TO_CATS )
print('PATH_TO_HOME ', PATH_TO_HOME )
print('HOME_PATH ', HOME_PATH )
print('\n\n\n\n####################\n\n\n\n')
if not os.path.isdir(HOME_PATH): os.system('mkdir %s'%HOME_PATH)
print ('Changing to %s'%HOME_PATH)
os.chdir(HOME_PATH)
extra = retrieve_archival_data(field = field, retrieve_bool = retrieve_bool)
print ('Changing to %s'%PATH_TO_PREP)
os.chdir(PATH_TO_PREP)
visits, filters = grizli_getfiles(run = files_bool)
if prep_bool:
grizli_prep(visits = visits, field = field, run = prep_bool)
if new_model:
grp = grizli_model(visits, field = field, ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141',
run = model_bool, new_model = new_model, mag_lim = mag_lim)
if beams_bool:
print ('making beams')
grp = grizli_model(visits, field = field, ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141',
run = model_bool, new_model = False, mag_lim = mag_lim)
Parallel(n_jobs = n_jobs, backend = 'threading')(delayed(grizli_beams)(grp, id = id, min_id = fit_min_id, mag = mag, field = field,
mag_lim = mag_lim, mag_lim_lower = mag_max)
for id, mag in zip(np.array(grp.catalog['NUMBER']), np.array(grp.catalog['MAG_AUTO'])))
if make_catalog:
grp = grizli_model(visits, field = field, ref_filter_1 = 'F105W', ref_grism_1 = 'G102', ref_filter_2 = 'F140W', ref_grism_2 = 'G141',
run = model_bool, new_model = False, mag_lim = mag_lim)
to_save = np.array([grp.catalog['NUMBER'], grp.catalog['MAG_AUTO']])
np.save('/user/rsimons/grizli_extractions/Catalogs/model_catalogs/%s_catalog.npy'%field, to_save)
if fit_bool:
print ('Changing to %s'%PATH_TO_PREP)
os.chdir(PATH_TO_PREP)
templ0 = grizli.utils.load_templates(fwhm=1200, line_complexes=True, stars=False,
full_line_list=None, continuum_list=None,
fsps_templates=True)
# Load individual line templates for fitting the line fluxes
templ1 = grizli.utils.load_templates(fwhm=1200, line_complexes=False, stars=False,
full_line_list=None, continuum_list=None,
fsps_templates=True)
#templ0, templ1 = grizli.utils.load_quasar_templates(uv_line_complex = False, broad_fwhm = 2800,
# narrow_fwhm = 1000, fixed_narrow_lines = True)
p = Pointing(field = field, ref_filter = 'F105W')
pline = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 16, 'wcs': None}
if not fit_without_phot:
eazy.symlink_eazy_inputs(path=os.path.dirname(eazy.__file__)+'/data')#, path_is_env=False)
ez = eazy.photoz.PhotoZ(param_file=None, translate_file=p.translate_file,
zeropoint_file=None, params=p.params,
load_prior=True, load_products=False)
ep = photoz.EazyPhot(ez, grizli_templates=templ0, zgrid=ez.zgrid)
else:
ep = None
cat_ = np.load('/user/rsimons/grizli_extractions/Catalogs/model_catalogs/%s_catalog.npy'%field)[()]
nums = cat_[0]
mags = cat_[1]
if run_parallel:
Parallel(n_jobs = n_jobs, backend = 'threading')(delayed(grizli_fit)(id = id, min_id = fit_min_id, mag = mag, field = field,
mag_lim = mag_lim, mag_lim_lower = mag_max, run = fit_bool,
id_choose = id_choose, use_pz_prior = False, use_phot = True,
scale_phot = True, templ0 = templ0, templ1 = templ1,
ep = ep, pline = pline, phot_scale_order = phot_scale_order, use_psf = use_psf, fit_without_phot = fit_without_phot,
zr = [args['zr_min'], args['zr_max']])
for id, mag in zip(nums.astype('int'), mags))
for id, mag in zip(nums.astype('int'), mags):
grizli_fit(id = id, min_id = fit_min_id, mag = mag, field = field,
mag_lim = mag_lim, mag_lim_lower = mag_max, run = fit_bool,
id_choose = id_choose, use_pz_prior = False, use_phot = True,
scale_phot = True, templ0 = templ0, templ1 = templ1,
ep = ep, pline = pline, phot_scale_order = phot_scale_order, use_psf = use_psf, fit_without_phot = fit_without_phot,
zr = [args['zr_min'], args['zr_max']])
print ('Changing to %s'%PATH_TO_SCRIPTS)
os.chdir(PATH_TO_SCRIPTS)
|
mit
|
jakob-skinner/Orbit-Fitting
|
plotter.py
|
2
|
4787
|
#import-libraries-and-data---------------------------------------------------------------------------------------#
import os, numpy as np, functions as f
from matplotlib.gridspec import GridSpec
from matplotlib import pyplot as plt, rcParams
#rcParams.update({'figure.autolayout' : True})
# Select the file.
file = 'data/2144+4211/2144+4211.tbl'
# Create the data variable.
data = np.genfromtxt(file, skip_header=1, usecols=(1, 2, 3, 4, 5))
source = np.genfromtxt(file, dtype=str, skip_header=1, usecols=(8))
# Extract the shorthand name.
system = file.replace('.tbl', '')[5:14]
#define-variables------------------------------------------------------------------------------------------------#
JD, RVp, RVs = [datum[0] for datum in data], [datum[1] for datum in data], [datum[3] for datum in data]
p_err, s_err = [datum[2] for datum in data], [datum[4] for datum in data]
JDp, RVp, p_err = f.adjustment(JD, RVp, p_err)
JDs, RVs, s_err = f.adjustment(JD, RVs, s_err)
#define-functions------------------------------------------------------------------------------------------------#
RV, phases = f.RV, f.phases
#now-do-things!--------------------------------------------------------------------------------------------------#
# Check for HET values, create data filter to pick them out.
HET = [1 if x == 'HET' else 0 for x in source]
# This is less efficient than it could be, but oh well.
# Separate APOGEE and HET observations.
APO_JDp = np.asarray([JDp[i] for i in range(len(JDp)) if not HET[i]])
APO_JDs = np.asarray([JDs[i] for i in range(len(JDs)) if not HET[i]])
APO_RVp = np.asarray([RVp[i] for i in range(len(RVp)) if not HET[i]])
APO_RVs = np.asarray([RVs[i] for i in range(len(RVs)) if not HET[i]])
APO_p_err = np.asarray([p_err[i] for i in range(len(p_err)) if not HET[i]])
APO_s_err = np.asarray([s_err[i] for i in range(len(s_err)) if not HET[i]])
HET_JDp = np.asarray([JDp[i] for i in range(len(JDp)) if HET[i]])
HET_JDs = np.asarray([JDs[i] for i in range(len(JDs)) if HET[i]])
HET_RVp = np.asarray([RVp[i] for i in range(len(RVp)) if HET[i]])
HET_RVs = np.asarray([RVs[i] for i in range(len(RVs)) if HET[i]])
HET_p_err = np.asarray([p_err[i] for i in range(len(p_err)) if HET[i]])
HET_s_err = np.asarray([s_err[i] for i in range(len(s_err)) if HET[i]])
mass_ratio = 0.946985628187
parms = [61.1551472716, 0, 0, 2456205.33813, 3.29813071475, -17.2248465232]
#create the curves plot
fig = plt.figure(figsize=(11,10))
gs = GridSpec(2,1, height_ratios = [4,1])
ax1 = fig.add_subplot(gs[0,0])
ax1.tick_params(labelsize=14)
ax2 = fig.add_subplot(gs[1,0])
ax2.tick_params(labelsize=14)
plt.subplots_adjust(wspace=0, hspace=0)
plt.tick_params(direction='in')
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
#fig.suptitle('Radial Velocity Curve for ' + system, fontsize = 22)
x = np.linspace(0, parms[-2], num=1000)
primary, secondary = RV(x, mass_ratio, parms)
ax1.plot(x, np.ones(len(x))*parms[-1], 'k', lw=1 , label='Systemic Velocity')
ax1.plot(x/parms[-2], primary, 'b', lw=1, label='Primary Curve')
ax1.plot(x/parms[-2], secondary, 'r--', lw=1, label='Secondary Curve')
# Phase APOGEE data to result period and plot
ax1.errorbar(phases(parms[-2], APO_JDp), APO_RVp, APO_p_err, np.zeros(len(APO_JDp)), 'ko', label='Primary RV Data')
ax1.errorbar(phases(parms[-2], APO_JDs), APO_RVs, APO_s_err, np.zeros(len(APO_JDs)), 'ks', label='Secondary RV data')
# Plot the APOGEE observed - computed underplot
ax2.plot((0, 1), np.zeros(2), 'k', lw = 1)
ax2.errorbar(phases(parms[-2], APO_JDp), APO_RVp-RV(APO_JDp, mass_ratio, parms)[0], APO_p_err, np.zeros(len(APO_JDp)), 'bo')
ax2.errorbar(phases(parms[-2], APO_JDs), APO_RVs-RV(APO_JDs, mass_ratio, parms)[1], APO_s_err, np.zeros(len(APO_JDs)), 'rs')
# If any HET observations are present, include them.
if not len(HET_JDp+HET_JDs) == 0:
# Phase HET data to result period and plot
ax1.errorbar(phases(parms[-2], HET_JDp), HET_RVp, HET_p_err, np.zeros(len(HET_JDp)), 'kv', label='Primary RV Data')
ax1.errorbar(phases(parms[-2], HET_JDs), HET_RVs, HET_s_err, np.zeros(len(HET_JDs)), 'k^', label='Secondary RV data')
# Plot the HET observed - computed underplot
ax2.plot((0, 1), np.zeros(2), 'k', lw = 1)
ax2.errorbar(phases(parms[-2], HET_JDp), HET_RVp-RV(HET_JDp, mass_ratio, parms)[0], HET_p_err, np.zeros(len(HET_JDp)), 'bv')
ax2.errorbar(phases(parms[-2], HET_JDs), HET_RVs-RV(HET_JDs, mass_ratio, parms)[1], HET_s_err, np.zeros(len(HET_JDs)), 'r^')
# Adjust the look of the plot
plt.xlabel('Orbital Phase', fontsize = 20)
ax1.set_ylabel('Radial Velocity $\\frac{km}{s}$', fontsize = 20)
ax2.set_ylabel('O - C $\\frac{km}{s}$', fontsize = 20)
ax1.set_xlim([0,1])
ax2.set_xlim([0,1])
plt.savefig('4211_RV.pdf', bbox_inches='tight')
plt.show()
|
gpl-3.0
|
marcocaccin/scikit-learn
|
examples/covariance/plot_covariance_estimation.py
|
250
|
5070
|
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
|
bsd-3-clause
|
wdurhamh/statsmodels
|
statsmodels/discrete/discrete_model.py
|
17
|
116208
|
"""
Limited dependent variable and qualitative variables.
Includes binary outcomes, count data, (ordered) ordinal data and limited
dependent variables.
General References
--------------------
A.C. Cameron and P.K. Trivedi. `Regression Analysis of Count Data`.
Cambridge, 1998
G.S. Madalla. `Limited-Dependent and Qualitative Variables in Econometrics`.
Cambridge, 1983.
W. Greene. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.
"""
from __future__ import division
__all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial"]
from statsmodels.compat.python import lmap, lzip, range
import numpy as np
from scipy.special import gammaln
from scipy import stats, special, optimize # opt just for nbin
import statsmodels.tools.tools as tools
from statsmodels.tools import data as data_tools
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly)
from statsmodels.regression.linear_model import OLS
from scipy import stats, special, optimize # opt just for nbin
from scipy.stats import nbinom
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from statsmodels.tools.numdiff import (approx_fprime, approx_hess,
approx_hess_cs, approx_fprime_cs)
import statsmodels.base.model as base
from statsmodels.base.data import handle_data # for mnlogit
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.compat.numpy import np_matrix_rank
from pandas.core.api import get_dummies
from statsmodels.base.l1_slsqp import fit_l1_slsqp
try:
import cvxopt
have_cvxopt = True
except ImportError:
have_cvxopt = False
#TODO: When we eventually get user-settable precision, we need to change
# this
FLOAT_EPS = np.finfo(float).eps
#TODO: add options for the parameter covariance/variance
# ie., OIM, EIM, and BHHH see Green 21.4
_discrete_models_docs = """
"""
_discrete_results_docs = """
%(one_line_description)s
Parameters
----------
model : A DiscreteModel instance
params : array-like
The parameters of a fitted model.
hessian : array-like
The hessian of the fitted model.
scale : float
A scale parameter for the covariance matrix.
Returns
-------
*Attributes*
aic : float
Akaike information criterion. `-2*(llf - p)` where `p` is the number
of regressors including the intercept.
bic : float
Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the
number of regressors including the intercept.
bse : array
The standard errors of the coefficients.
df_resid : float
See model definition.
df_model : float
See model definition.
fitted_values : array
Linear predictor XB.
llf : float
Value of the loglikelihood
llnull : float
Value of the constant-only loglikelihood
llr : float
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
llr_pvalue : float
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
prsquared : float
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
%(extra_attr)s"""
_l1_results_attr = """ nnz_params : Integer
The number of nonzero parameters in the model. Train with
trim_params == True or else numerical error will distort this.
trimmed : Boolean array
trimmed[i] == True if the ith parameter was trimmed from the model."""
# helper for MNLogit (will be generally useful later)
def _numpy_to_dummies(endog):
if endog.dtype.kind in ['S', 'O']:
endog_dummies, ynames = tools.categorical(endog, drop=True,
dictnames=True)
elif endog.ndim == 2:
endog_dummies = endog
ynames = range(endog.shape[1])
else:
endog_dummies, ynames = tools.categorical(endog, drop=True,
dictnames=True)
return endog_dummies, ynames
def _pandas_to_dummies(endog):
if endog.ndim == 2:
if endog.shape[1] == 1:
yname = endog.columns[0]
endog_dummies = get_dummies(endog.icol(0))
else: # series
yname = 'y'
endog_dummies = endog
else:
yname = endog.name
endog_dummies = get_dummies(endog)
ynames = endog_dummies.columns.tolist()
return endog_dummies, ynames, yname
#### Private Model Classes ####
class DiscreteModel(base.LikelihoodModel):
"""
Abstract class for discrete choice models.
This class does not do anything itself but lays out the methods and
call signature expected of child classes in addition to those of
statsmodels.model.LikelihoodModel.
"""
def __init__(self, endog, exog, **kwargs):
super(DiscreteModel, self).__init__(endog, exog, **kwargs)
self.raise_on_perfect_prediction = True
def initialize(self):
"""
Initialize is called by
statsmodels.model.LikelihoodModel.__init__
and should contain any preprocessing that needs to be done for a model.
"""
# assumes constant
self.df_model = float(np_matrix_rank(self.exog) - 1)
self.df_resid = (float(self.exog.shape[0] -
np_matrix_rank(self.exog)))
def cdf(self, X):
"""
The cumulative distribution function of the model.
"""
raise NotImplementedError
def pdf(self, X):
"""
The probability density (mass) function of the model.
"""
raise NotImplementedError
def _check_perfect_pred(self, params, *args):
endog = self.endog
fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]]))
if (self.raise_on_perfect_prediction and
np.allclose(fittedvalues - endog, 0)):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.base.model.LikelihoodModel.fit
"""
if callback is None:
callback = self._check_perfect_pred
else:
pass # make a function factory to have multiple call-backs
mlefit = super(DiscreteModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
return mlefit # up to subclasses to wrap results
fit.__doc__ += base.LikelihoodModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=True,
callback=None, alpha=0, trim_mode='auto',
auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03,
qc_verbose=False, **kwargs):
"""
Fit the model using a regularized maximum likelihood.
The regularization method AND the solver used is determined by the
argument method.
Parameters
----------
start_params : array-like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : 'l1' or 'l1_cvxopt_cp'
See notes for details.
maxiter : Integer or 'defined_by_method'
Maximum number of iterations to perform.
If 'defined_by_method', then use method defaults (see notes).
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
fargs : tuple
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto, 'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been
zero if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If 'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode == 'size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and don't allow auto trim when (ii) (above) is
violated by this much.
qc_verbose : Boolean
If true, print out a full QC report upon failure
Notes
-----
Extra parameters are not penalized if alpha is given as a scalar.
An example is the shape parameter in NegativeBinomial `nb1` and `nb2`.
Optional arguments for the solvers (available in Results.mle_settings)::
'l1'
acc : float (default 1e-6)
Requested accuracy as used by slsqp
'l1_cvxopt_cp'
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT
equations (default: 1).
Optimization methodology
With :math:`L` the negative log likelihood, we solve the convex but
non-smooth problem
.. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem
in twice as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
With :math:`\\partial_k L` the derivative of :math:`L` in the
:math:`k^{th}` parameter direction, theory dictates that, at the
minimum, exactly one of two conditions holds:
(i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0`
(ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0`
"""
### Set attributes based on method
if method in ['l1', 'l1_cvxopt_cp']:
cov_params_func = self.cov_params_func_l1
else:
raise Exception("argument method == %s, which is not handled"
% method)
### Bundle up extra kwargs for the dictionary kwargs. These are
### passed through super(...).fit() as kwargs and unpacked at
### appropriate times
alpha = np.array(alpha)
assert alpha.min() >= 0
try:
kwargs['alpha'] = alpha
except TypeError:
kwargs = dict(alpha=alpha)
kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0])
kwargs['trim_mode'] = trim_mode
kwargs['size_trim_tol'] = size_trim_tol
kwargs['auto_trim_tol'] = auto_trim_tol
kwargs['qc_tol'] = qc_tol
kwargs['qc_verbose'] = qc_verbose
### Define default keyword arguments to be passed to super(...).fit()
if maxiter == 'defined_by_method':
if method == 'l1':
maxiter = 1000
elif method == 'l1_cvxopt_cp':
maxiter = 70
## Parameters to pass to super(...).fit()
# For the 'extra' parameters, pass all that are available,
# even if we know (at this point) we will only use one.
extra_fit_funcs = {'l1': fit_l1_slsqp}
if have_cvxopt and method == 'l1_cvxopt_cp':
from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp
extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp
elif method.lower() == 'l1_cvxopt_cp':
message = ("Attempt to use l1_cvxopt_cp failed since cvxopt "
"could not be imported")
if callback is None:
callback = self._check_perfect_pred
else:
pass # make a function factory to have multiple call-backs
mlefit = super(DiscreteModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs,
cov_params_func=cov_params_func, **kwargs)
return mlefit # up to subclasses to wrap results
def cov_params_func_l1(self, likelihood_model, xopt, retvals):
"""
Computes cov_params on a reduced parameter space
corresponding to the nonzero parameters resulting from the
l1 regularized fit.
Returns a full cov_params matrix, with entries corresponding
to zero'd values set to np.nan.
"""
H = likelihood_model.hessian(xopt)
trimmed = retvals['trimmed']
nz_idx = np.nonzero(trimmed == False)[0]
nnz_params = (trimmed == False).sum()
if nnz_params > 0:
H_restricted = H[nz_idx[:, None], nz_idx]
# Covariance estimate for the nonzero params
H_restricted_inv = np.linalg.inv(-H_restricted)
else:
H_restricted_inv = np.zeros(0)
cov_params = np.nan * np.ones(H.shape)
cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv
return cov_params
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, dummy_idx=None,
count_idx=None):
"""
This should implement the derivative of the non-linear function
"""
raise NotImplementedError
class BinaryModel(DiscreteModel):
def __init__(self, endog, exog, **kwargs):
super(BinaryModel, self).__init__(endog, exog, **kwargs)
if (not issubclass(self.__class__, MultinomialModel) and
not np.all((self.endog >= 0) & (self.endog <= 1))):
raise ValueError("endog must be in the unit interval.")
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array-like
Fitted parameters of the model.
exog : array-like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Returns
-------
array
Fitted values at exog.
"""
if exog is None:
exog = self.exog
if not linear:
return self.cdf(np.dot(exog, params))
else:
return np.dot(exog, params)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
bnryfit = super(BinaryModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
if method in ['l1', 'l1_cvxopt_cp']:
discretefit = L1BinaryResults(self, bnryfit)
else:
raise Exception(
"argument method == %s, which is not handled" % method)
return L1BinaryResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
dF = self.pdf(np.dot(exog, params))[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
#note, this form should be appropriate for
## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
margeff = np.dot(self.pdf(np.dot(exog, params))[:,None],
params[None,:])
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
class MultinomialModel(BinaryModel):
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
if data_tools._is_using_ndarray_type(endog, None):
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
elif data_tools._is_using_pandas(endog, None):
endog_dummies, ynames, yname = _pandas_to_dummies(endog)
else:
endog = np.asarray(endog)
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
if not isinstance(ynames, dict):
ynames = dict(zip(range(endog_dummies.shape[1]), ynames))
self._ynames_map = ynames
data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs)
data.ynames = yname # overwrite this to single endog name
data.orig_endog = endog
self.wendog = data.endog
# repeating from upstream...
for key in kwargs:
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError:
pass
return data
def initialize(self):
"""
Preprocesses the data for MNLogit.
"""
super(MultinomialModel, self).initialize()
# This is also a "whiten" method in other models (eg regression)
self.endog = self.endog.argmax(1) # turn it into an array of col idx
self.J = self.wendog.shape[1]
self.K = self.exog.shape[1]
self.df_model *= (self.J-1) # for each J - 1 equation.
self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1)
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array-like
2d array of fitted parameters of the model. Should be in the
order returned from the model.
exog : array-like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used. If a 1d array is given
it assumed to be 1 row of exogenous variables. If you only have
one regressor and would like to do prediction, you must provide
a 2d array with shape[1] == 1.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Notes
-----
Column 0 is the base case, the rest conform to the rows of params
shifted up one for the base case.
"""
if exog is None: # do here to accomodate user-given exog
exog = self.exog
if exog.ndim == 1:
exog = exog[None]
pred = super(MultinomialModel, self).predict(params, exog, linear)
if linear:
pred = np.column_stack((np.zeros(len(exog)), pred))
return pred
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
callback = lambda x : None # placeholder until check_perfect_pred
# skip calling super to handle results from LikelihoodModel
mnfit = base.LikelihoodModel.fit(self, start_params = start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = MultinomialResults(self, mnfit)
return MultinomialResultsWrapper(mnfit)
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
mnfit = DiscreteModel.fit_regularized(
self, start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = L1MultinomialResults(self, mnfit)
return L1MultinomialResultsWrapper(mnfit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predicted probabilities for each
choice. dFdparams is of shape nobs x (J*K) x (J-1)*K.
The zero derivatives for the base category are not included.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(self.K, self.J-1, order='F')
eXB = np.exp(np.dot(exog, params))
sum_eXB = (1 + eXB.sum(1))[:,None]
J, K = lmap(int, [self.J, self.K])
repeat_eXB = np.repeat(eXB, J, axis=1)
X = np.tile(exog, J-1)
# this is the derivative wrt the base level
F0 = -repeat_eXB * X / sum_eXB ** 2
# this is the derivative wrt the other levels when
# dF_j / dParams_j (ie., own equation)
#NOTE: this computes too much, any easy way to cut down?
F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2)
F1 = F1.transpose((1,0,2)) # put the nobs index first
# other equation index
other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool)
F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \
(sum_eXB**2)).transpose((1,0,2))[:, other_idx]
dFdX = np.concatenate((F0[:, None,:], F1), axis=1)
if 'ey' in transform:
dFdX /= self.predict(params, exog)[:, :, None]
return dFdX
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
For Multinomial models the marginal effects are
P[j] * (params[j] - sum_k P[k]*params[k])
It is returned unshaped, so that each row contains each of the J
equations. This makes it easier to take derivatives of this for
standard errors. If you want average marginal effects you can do
margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects
for choice J are in column J
"""
J = int(self.J) # number of alternative choices
K = int(self.K) # number of variables
#note, this form should be appropriate for
## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(K, J-1, order='F')
zeroparams = np.c_[np.zeros(K), params] # add base in
cdf = self.cdf(np.dot(exog, params))
margeff = np.array([cdf[:,[j]]* (zeroparams[:,j]-np.array([cdf[:,[i]]*
zeroparams[:,i] for i in range(int(J))]).sum(0))
for j in range(J)])
margeff = np.transpose(margeff, (1,2,0))
# swap the axes to make sure margeff are in order nobs, K, J
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None,:]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff.reshape(len(exog), -1, order='F')
class CountModel(DiscreteModel):
def __init__(self, endog, exog, offset=None, exposure=None, missing='none',
**kwargs):
super(CountModel, self).__init__(endog, exog, missing=missing,
offset=offset,
exposure=exposure, **kwargs)
if exposure is not None:
self.exposure = np.log(self.exposure)
self._check_inputs(self.offset, self.exposure, self.endog)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
def _check_inputs(self, offset, exposure, endog):
if offset is not None and offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if exposure is not None and exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609
kwds = super(CountModel, self)._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def predict(self, params, exog=None, exposure=None, offset=None,
linear=False):
"""
Predict response variable of a count model given exogenous variables.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
#TODO: add offset tp
if exog is None:
exog = self.exog
offset = getattr(self, 'offset', 0)
exposure = getattr(self, 'exposure', 0)
else:
if exposure is None:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
offset = 0
if not linear:
return np.exp(np.dot(exog, params[:exog.shape[1]]) + exposure + offset) # not cdf
else:
return np.dot(exog, params[:exog.shape[1]]) + exposure + offset
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
#NOTE: this handles offset and exposure
dF = self.predict(params, exog)[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""
For computing marginal effects. These are the marginal effects
d F(XB) / dX
For the Poisson model F(XB) is the predicted counts rather than
the probabilities.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# group 3 poisson, nbreg, zip, zinb
if exog is None:
exog = self.exog
margeff = self.predict(params, exog)[:,None] * params[None,:]
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
cntfit = super(CountModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
discretefit = CountResults(self, cntfit)
return CountResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
if method in ['l1', 'l1_cvxopt_cp']:
discretefit = L1CountResults(self, cntfit)
else:
raise Exception(
"argument method == %s, which is not handled" % method)
return L1CountResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
class OrderedModel(DiscreteModel):
pass
#### Public Model Classes ####
class Poisson(CountModel):
__doc__ = """
Poisson model for count data
%(params)s
%(extra_params)s
Attributes
-----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' :
"""offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc}
def cdf(self, X):
"""
Poisson model cumulative distribution function
Parameters
-----------
X : array-like
`X` is the linear predictor of the model. See notes.
Returns
-------
The value of the Poisson CDF at each point.
Notes
-----
The CDF is defined as
.. math:: \\exp\left(-\\lambda\\right)\\sum_{i=0}^{y}\\frac{\\lambda^{i}}{i!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=X\\beta
The parameter `X` is :math:`X\\beta` in the above formula.
"""
y = self.endog
return stats.poisson.cdf(y, np.exp(X))
def pdf(self, X):
"""
Poisson model probability mass function
Parameters
-----------
X : array-like
`X` is the linear predictor of the model. See notes.
Returns
-------
pdf : ndarray
The value of the Poisson probability mass function, PMF, for each
point of X.
Notes
--------
The PMF is defined as
.. math:: \\frac{e^{-\\lambda_{i}}\\lambda_{i}^{y_{i}}}{y_{i}!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
The parameter `X` is :math:`x_{i}\\beta` in the above formula.
"""
y = self.endog
return np.exp(stats.poisson.logpmf(y, np.exp(X)))
def loglike(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
return np.sum(-np.exp(XB) + endog*XB - gammaln(endog+1))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
loglike : ndarray (nobs,)
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
--------
.. math :: \\ln L_{i}=\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
for observations :math:`i=1,...,n`
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
#np.sum(stats.poisson.logpmf(endog, np.exp(XB)))
return -np.exp(XB) + endog*XB - gammaln(endog+1)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
cntfit = super(CountModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}
else:
kwds = {}
discretefit = PoissonResults(self, cntfit, **kwds)
return PoissonResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
if method in ['l1', 'l1_cvxopt_cp']:
discretefit = L1PoissonResults(self, cntfit)
else:
raise Exception(
"argument method == %s, which is not handled" % method)
return L1PoissonResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
#constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from patsy import DesignInfo
from statsmodels.base._constraints import fit_constrained
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(maxiter=0, method='nm', disp=0,
warn_convergence=False) # we get a wrapper back
res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)
res.mle_retvals['iterations'] = res_constr.mle_retvals.get(
'iterations', np.nan)
res.mle_retvals['converged'] = res_constr.mle_retvals['converged']
res._results.params = params
res._results.normalized_cov_params = cov
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = lc
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
def score(self, params):
"""
Poisson model score (gradient) vector of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\lambda_{i}\\right)x_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return np.dot(self.endog - L, X)
def score_obs(self, params):
"""
Poisson model Jacobian of the log-likelihood for each observation
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score : ndarray (nobs, k_vars)
The score vector of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)[:,None] * X
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7")
def hessian(self, params):
"""
Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}x_{i}x_{i}^{\\prime}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return -np.dot(L*X.T, X)
class Logit(BinaryModel):
__doc__ = """
Binary choice logit model
%(params)s
%(extra_params)s
Attributes
-----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def cdf(self, X):
"""
The logistic cumulative distribution function
Parameters
----------
X : array-like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
1/(1 + exp(-X))
Notes
------
In the logit model,
.. math:: \\Lambda\\left(x^{\\prime}\\beta\\right)=\\text{Prob}\\left(Y=1|x\\right)=\\frac{e^{x^{\\prime}\\beta}}{1+e^{x^{\\prime}\\beta}}
"""
X = np.asarray(X)
return 1/(1+np.exp(-X))
def pdf(self, X):
"""
The logistic probability density function
Parameters
-----------
X : array-like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
pdf : ndarray
The value of the Logit probability mass function, PMF, for each
point of X. ``np.exp(-x)/(1+np.exp(-X))**2``
Notes
-----
In the logit model,
.. math:: \\lambda\\left(x^{\\prime}\\beta\\right)=\\frac{e^{-x^{\\prime}\\beta}}{\\left(1+e^{-x^{\\prime}\\beta}\\right)^{2}}
"""
X = np.asarray(X)
return np.exp(-X)/(1+np.exp(-X))**2
def loglike(self, params):
"""
Log-likelihood of logit model.
Parameters
-----------
params : array-like
The parameters of the logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
------
.. math:: \\ln L=\\sum_{i}\\ln\\Lambda\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.sum(np.log(self.cdf(q*np.dot(X,params))))
def loglikeobs(self, params):
"""
Log-likelihood of logit model for each observation.
Parameters
-----------
params : array-like
The parameters of the logit model.
Returns
-------
loglike : ndarray (nobs,)
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
------
.. math:: \\ln L=\\sum_{i}\\ln\\Lambda\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.log(self.cdf(q*np.dot(X,params)))
def score(self, params):
"""
Logit model score (gradient) vector of the log-likelihood
Parameters
----------
params: array-like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
"""
y = self.endog
X = self.exog
L = self.cdf(np.dot(X,params))
return np.dot(y - L,X)
def score_obs(self, params):
"""
Logit model Jacobian of the log-likelihood for each observation
Parameters
----------
params: array-like
The parameters of the model
Returns
-------
jac : ndarray, (nobs, k_vars)
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
"""
y = self.endog
X = self.exog
L = self.cdf(np.dot(X, params))
return (y - L)[:,None] * X
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7")
def hessian(self, params):
"""
Logit model Hessian matrix of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i}\\Lambda_{i}\\left(1-\\Lambda_{i}\\right)x_{i}x_{i}^{\\prime}
"""
X = self.exog
L = self.cdf(np.dot(X,params))
return -np.dot(L*(1-L)*X.T,X)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super(Logit, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
discretefit = LogitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
class Probit(BinaryModel):
__doc__ = """
Binary choice Probit model
%(params)s
%(extra_params)s
Attributes
-----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def cdf(self, X):
"""
Probit (Normal) cumulative distribution function
Parameters
----------
X : array-like
The linear predictor of the model (XB).
Returns
--------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
This function is just an alias for scipy.stats.norm.cdf
"""
return stats.norm._cdf(X)
def pdf(self, X):
"""
Probit (Normal) probability density function
Parameters
----------
X : array-like
The linear predictor of the model (XB).
Returns
--------
pdf : ndarray
The value of the normal density function for each point of X.
Notes
-----
This function is just an alias for scipy.stats.norm.pdf
"""
X = np.asarray(X)
return stats.norm._pdf(X)
def loglike(self, params):
"""
Log-likelihood of probit model (i.e., the normal distribution).
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{i}\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)),
FLOAT_EPS, 1)))
def loglikeobs(self, params):
"""
Log-likelihood of probit model for each observation
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
loglike : ndarray (nobs,)
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math:: \\ln L_{i}=\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))
def score(self, params):
"""
Probit model score (gradient) vector
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = np.dot(X,params)
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return np.dot(L,X)
def score_obs(self, params):
"""
Probit model Jacobian for each observation
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
jac : ndarray, (nobs, k_vars)
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
for observations :math:`i=1,...,n`
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = np.dot(X,params)
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return L[:,None] * X
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7")
def hessian(self, params):
"""
Probit model Hessian matrix of the log-likelihood
Parameters
----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime}
where
.. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}
and :math:`q=2y-1`
"""
X = self.exog
XB = np.dot(X,params)
q = 2*self.endog - 1
L = q*self.pdf(q*XB)/self.cdf(q*XB)
return np.dot(-L*(L+XB)*X.T,X)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super(Probit, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
discretefit = ProbitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
class MNLogit(MultinomialModel):
__doc__ = """
Multinomial logit model
Parameters
----------
endog : array-like
`endog` is an 1-d vector of the endogenous response. `endog` can
contain strings, ints, or floats. Note that if it contains strings,
every distinct string will be a category. No stripping of whitespace
is done.
exog : array-like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See `statsmodels.tools.add_constant`.
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
J : float
The number of choices for the endogenous variable. Note that this
is zero-indexed.
K : float
The actual number of parameters for the exogenous design. Includes
the constant if the design has one.
names : dict
A dictionary mapping the column number in `wendog` to the variables
in `endog`.
wendog : array
An n x j array where j is the number of unique categories in `endog`.
Each column of j is a dummy variable indicating the category of
each observation. See `names` for a dictionary mapping each column to
its category.
Notes
-----
See developer notes for further information on `MNLogit` internals.
""" % {'extra_params' : base._missing_param_doc}
def pdf(self, eXB):
"""
NotImplemented
"""
raise NotImplementedError
def cdf(self, X):
"""
Multinomial logit cumulative distribution function.
Parameters
----------
X : array
The linear predictor of the model XB.
Returns
--------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
In the multinomial logit model.
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}
"""
eXB = np.column_stack((np.ones(len(X)), np.exp(X)))
return eXB/eXB.sum(1)[:,None]
def loglike(self, params):
"""
Log-likelihood of the multinomial logit model.
Parameters
----------
params : array-like
The parameters of the multinomial logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
------
.. math:: \\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return np.sum(d * logprob)
def loglikeobs(self, params):
"""
Log-likelihood of the multinomial logit model for each observation.
Parameters
----------
params : array-like
The parameters of the multinomial logit model.
Returns
-------
loglike : ndarray (nobs,)
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
------
.. math:: \\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
for observations :math:`i=1,...,n`
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return d * logprob
def score(self, params):
"""
Score matrix for multinomial logit model log-likelihood
Parameters
----------
params : array
The parameters of the multinomial logit model.
Returns
--------
score : ndarray, (K * (J-1),)
The 2-d score vector, i.e. the first derivative of the
loglikelihood function, of the multinomial logit model evaluated at
`params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`
In the multinomial model the score matrix is K x J-1 but is returned
as a flattened array to work with the solvers.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return np.dot(firstterm.T, self.exog).flatten()
def loglike_and_score(self, params):
"""
Returns log likelihood and score, efficiently reusing calculations.
Note that both of these returned quantities will need to be negated
before being minimized by the maximum likelihood fitting machinery.
"""
params = params.reshape(self.K, -1, order='F')
cdf_dot_exog_params = self.cdf(np.dot(self.exog, params))
loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params))
firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:]
score_array = np.dot(firstterm.T, self.exog).flatten()
return loglike_value, score_array
def score_obs(self, params):
"""
Jacobian matrix for multinomial logit model log-likelihood
Parameters
----------
params : array
The parameters of the multinomial logit model.
Returns
--------
jac : ndarray, (nobs, k_vars*(J-1))
The derivative of the loglikelihood for each observation evaluated
at `params` .
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`, for observations :math:`i=1,...,n`
In the multinomial model the score vector is K x (J-1) but is returned
as a flattened array. The Jacobian has the observations in rows and
the flatteded array of derivatives in columns.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1)
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7")
def hessian(self, params):
"""
Multinomial logit Hessian matrix of the log-likelihood
Parameters
-----------
params : array-like
The parameters of the model
Returns
-------
hess : ndarray, (J*K, J*K)
The Hessian, second derivative of loglikelihood function with
respect to the flattened parameters, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime}
where
:math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0
otherwise.
The actual Hessian matrix has J**2 * K x K elements. Our Hessian
is reshaped to be square (J*K, J*K) so that the solvers can use it.
This implementation does not take advantage of the symmetry of
the Hessian and could probably be refactored for speed.
"""
params = params.reshape(self.K, -1, order='F')
X = self.exog
pr = self.cdf(np.dot(X,params))
partials = []
J = self.wendog.shape[1] - 1
K = self.exog.shape[1]
for i in range(J):
for j in range(J): # this loop assumes we drop the first col.
if i == j:
partials.append(\
-np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X))
else:
partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X))
H = np.array(partials)
# the developer's notes on multinomial should clear this math up
H = np.transpose(H.reshape(J,J,K,K), (0,2,1,3)).reshape(J*K,J*K)
return H
#TODO: Weibull can replaced by a survival analsysis function
# like stat's streg (The cox model as well)
#class Weibull(DiscreteModel):
# """
# Binary choice Weibull model
#
# Notes
# ------
# This is unfinished and untested.
# """
##TODO: add analytic hessian for Weibull
# def initialize(self):
# pass
#
# def cdf(self, X):
# """
# Gumbell (Log Weibull) cumulative distribution function
# """
## return np.exp(-np.exp(-X))
# return stats.gumbel_r.cdf(X)
# # these two are equivalent.
# # Greene table and discussion is incorrect.
#
# def pdf(self, X):
# """
# Gumbell (LogWeibull) probability distribution function
# """
# return stats.gumbel_r.pdf(X)
#
# def loglike(self, params):
# """
# Loglikelihood of Weibull distribution
# """
# X = self.exog
# cdf = self.cdf(np.dot(X,params))
# y = self.endog
# return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf))
#
# def score(self, params):
# y = self.endog
# X = self.exog
# F = self.cdf(np.dot(X,params))
# f = self.pdf(np.dot(X,params))
# term = (y*f/F + (1 - y)*-f/(1-F))
# return np.dot(term,X)
#
# def hessian(self, params):
# hess = nd.Jacobian(self.score)
# return hess(params)
#
# def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):
## The example had problems with all zero start values, Hessian = 0
# if start_params is None:
# start_params = OLS(self.endog, self.exog).fit().params
# mlefit = super(Weibull, self).fit(start_params=start_params,
# method=method, maxiter=maxiter, tol=tol)
# return mlefit
#
class NegativeBinomial(CountModel):
__doc__ = """
Negative Binomial Model for count data
%(params)s
%(extra_params)s
Attributes
-----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
References
----------
References:
Greene, W. 2008. "Functional forms for the negtive binomial model
for count data". Economics Letters. Volume 99, Number 3, pp.585-590.
Hilbe, J.M. 2011. "Negative binomial regression". Cambridge University
Press.
""" % {'params' : base._model_params_doc,
'extra_params' :
"""loglike_method : string
Log-likelihood type. 'nb2','nb1', or 'geometric'.
Fitted value :math:`\\mu`
Heterogeneity parameter :math:`\\alpha`
- nb2: Variance equal to :math:`\\mu + \\alpha\\mu^2` (most common)
- nb1: Variance equal to :math:`\\mu + \\alpha\\mu`
- geometric: Variance equal to :math:`\\mu + \\mu^2`
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc}
def __init__(self, endog, exog, loglike_method='nb2', offset=None,
exposure=None, missing='none', **kwargs):
super(NegativeBinomial, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
self.loglike_method = loglike_method
self._initialize()
if loglike_method in ['nb2', 'nb1']:
self.exog_names.append('alpha')
self.k_extra = 1
else:
self.k_extra = 0
# store keys for extras if we need to recreate model instance
# we need to append keys that don't go to super
self._init_keys.append('loglike_method')
def _initialize(self):
if self.loglike_method == 'nb2':
self.hessian = self._hessian_nb2
self.score = self._score_nbin
self.loglikeobs = self._ll_nb2
self._transparams = True # transform lnalpha -> alpha in fit
elif self.loglike_method == 'nb1':
self.hessian = self._hessian_nb1
self.score = self._score_nb1
self.loglikeobs = self._ll_nb1
self._transparams = True # transform lnalpha -> alpha in fit
elif self.loglike_method == 'geometric':
self.hessian = self._hessian_geom
self.score = self._score_geom
self.loglikeobs = self._ll_geometric
else:
raise NotImplementedError("Likelihood type must nb1, nb2 or "
"geometric")
# Workaround to pickle instance methods
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
del odict['hessian']
del odict['score']
del odict['loglikeobs']
return odict
def __setstate__(self, indict):
self.__dict__.update(indict)
self._initialize()
def _ll_nbin(self, params, alpha, Q=0):
endog = self.endog
mu = self.predict(params)
size = 1/alpha * mu**Q
prob = size/(size+mu)
coeff = (gammaln(size+endog) - gammaln(endog+1) -
gammaln(size))
llf = coeff + size*np.log(prob) + endog*np.log(1-prob)
return llf
def _ll_nb2(self, params):
if self._transparams: # got lnalpha during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
return self._ll_nbin(params[:-1], alpha, Q=0)
def _ll_nb1(self, params):
if self._transparams: # got lnalpha during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
return self._ll_nbin(params[:-1], alpha, Q=1)
def _ll_geometric(self, params):
# we give alpha of 1 because it's actually log(alpha) where alpha=0
return self._ll_nbin(params, 1, 0)
def loglike(self, params):
r"""
Loglikelihood for negative binomial model
Parameters
----------
params : array-like
The parameters of the model. If `loglike_method` is nb1 or
nb2, then the ancillary parameter is expected to be the
last element.
Returns
-------
llf : float
The loglikelihood value at `params`
Notes
-----
Following notation in Greene (2008), with negative binomial
heterogeneity parameter :math:`\alpha`:
.. math::
\lambda_i &= exp(X\beta) \\
\theta &= 1 / \alpha \\
g_i &= \theta \lambda_i^Q \\
w_i &= g_i/(g_i + \lambda_i) \\
r_i &= \theta / (\theta+\lambda_i) \\
ln \mathcal{L}_i &= ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)
where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1.
For the geometric, :math:`\alpha=0` as well.
"""
llf = np.sum(self.loglikeobs(params))
return llf
def _score_geom(self, params):
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
dparams = exog * (y-mu)/(mu+1)
return dparams.sum(0)
def _score_nbin(self, params, Q=0):
"""
Score vector for NB2 model
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = 1/alpha * mu**Q
if Q: # nb1
dparams = exog*mu/alpha*(np.log(1/(alpha + 1)) +
special.digamma(y + mu/alpha) -
special.digamma(mu/alpha))
dalpha = ((alpha*(y - mu*np.log(1/(alpha + 1)) -
mu*(special.digamma(y + mu/alpha) -
special.digamma(mu/alpha) + 1)) -
mu*(np.log(1/(alpha + 1)) +
special.digamma(y + mu/alpha) -
special.digamma(mu/alpha)))/
(alpha**2*(alpha + 1))).sum()
else: # nb2
dparams = exog*a1 * (y-mu)/(mu+a1)
da1 = -alpha**-2
dalpha = (special.digamma(a1+y) - special.digamma(a1) + np.log(a1)
- np.log(a1+mu) - (a1+y)/(a1+mu) + 1).sum()*da1
#multiply above by constant outside sum to reduce rounding error
if self._transparams:
return np.r_[dparams.sum(0), dalpha*alpha]
else:
return np.r_[dparams.sum(0), dalpha]
def _score_nb1(self, params):
return self._score_nbin(params, Q=1)
def _hessian_geom(self, params):
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim, dim))
const_arr = mu*(1+y)/(mu+1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def _hessian_nb1(self, params):
"""
Hessian of NB1 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = mu/alpha
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
#const_arr = a1*mu*(a1+y)/(mu+a1)**2
# not all of dparams
dparams = exog/alpha*(np.log(1/(alpha + 1)) +
special.digamma(y + mu/alpha) -
special.digamma(mu/alpha))
dmudb = exog*mu
xmu_alpha = exog*mu/alpha
trigamma = (special.polygamma(1, mu/alpha + y) -
special.polygamma(1, mu/alpha))
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] +
xmu_alpha[:,i,None] * xmu_alpha[:,j,None] *
trigamma, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
da1 = -alpha**-2
dldpda = np.sum(-mu/alpha * dparams + exog*mu/alpha *
(-trigamma*mu/alpha**2 - 1/(alpha+1)), axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
digamma_part = (special.digamma(y + mu/alpha) -
special.digamma(mu/alpha))
log_alpha = np.log(1/(alpha+1))
alpha3 = alpha**3
alpha2 = alpha**2
mu2 = mu**2
dada = ((alpha3*mu*(2*log_alpha + 2*digamma_part + 3) -
2*alpha3*y + alpha2*mu2*trigamma +
4*alpha2*mu*(log_alpha + digamma_part) +
alpha2 * (2*mu - y) +
2*alpha*mu2*trigamma +
2*alpha*mu*(log_alpha + digamma_part) +
mu2*trigamma)/(alpha**4*(alpha2 + 2*alpha + 1)))
hess_arr[-1,-1] = dada.sum()
return hess_arr
def _hessian_nb2(self, params):
"""
Hessian of NB2 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
a1 = 1/alpha
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
const_arr = a1*mu*(a1+y)/(mu+a1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
da1 = -alpha**-2
dldpda = np.sum(mu*exog*(y-mu)*da1/(mu+a1)**2 , axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
#NOTE: polygamma(1,x) is the trigamma function
da2 = 2*alpha**-3
dalpha = da1 * (special.digamma(a1+y) - special.digamma(a1) +
np.log(a1) - np.log(a1+mu) - (a1+y)/(a1+mu) + 1)
dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) -
special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) +
(y - mu)/(mu + a1)**2)).sum()
hess_arr[-1,-1] = dada
return hess_arr
#TODO: replace this with analytic where is it used?
def score_obs(self, params):
sc = approx_fprime_cs(params, self.loglikeobs)
return sc
jac = np.deprecate(score_obs, 'jac', 'score_obs', "Use score_obs method."
" jac will be removed in 0.7")
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
# Note: don't let super handle robust covariance because it has
# transformed params
if self.loglike_method.startswith('nb') and method not in ['newton',
'ncg']:
self._transparams = True # in case same Model instance is refit
elif self.loglike_method.startswith('nb'): # method is newton/ncg
self._transparams = False # because we need to step in alpha space
if start_params is None:
# Use poisson fit as first guess.
#TODO, Warning: this assumes exposure is logged
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
start_params = mod_poi.fit(disp=0).params
if self.loglike_method.startswith('nb'):
start_params = np.append(start_params, 0.1)
mlefit = super(NegativeBinomial, self).fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=lambda x:x,
**kwargs)
# TODO: Fix NBin _check_perfect_pred
if self.loglike_method.startswith('nb'):
# mlefit is a wrapped counts results
self._transparams = False # don't need to transform anymore now
# change from lnalpha to alpha
if method not in ["newton", "ncg"]:
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
nbinfit = NegativeBinomialResults(self, mlefit._results)
result = NegativeBinomialResultsWrapper(nbinfit)
else:
result = mlefit
if cov_kwds is None:
cov_kwds = {} #TODO: make this unnecessary ?
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and
alpha != 0):
# don't penalize alpha if alpha is scalar
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
# alpha for regularized poisson to get starting values
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
# Use poisson fit as first guess.
#TODO, Warning: this assumes exposure is logged
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
if self.loglike_method.startswith('nb'):
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
if method in ['l1', 'l1_cvxopt_cp']:
discretefit = L1NegativeBinomialResults(self, cntfit)
else:
raise Exception(
"argument method == %s, which is not handled" % method)
return L1NegativeBinomialResultsWrapper(discretefit)
### Results Class ###
class DiscreteResults(base.LikelihoodModelResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for the discrete dependent variable models.",
"extra_attr" : ""}
def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None,
use_t=None):
#super(DiscreteResults, self).__init__(model, params,
# np.linalg.inv(-hessian), scale=1.)
self.model = model
self.df_model = model.df_model
self.df_resid = model.df_resid
self._cache = resettable_cache()
self.nobs = model.exog.shape[0]
self.__dict__.update(mlefit.__dict__)
if not hasattr(self, 'cov_type'):
# do this only if super, i.e. mlefit didn't already add cov_type
# robust covariance
if use_t is not None:
self.use_t = use_t
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the ' +
'covariance matrix of the errors is correctly ' +
'specified.'}
else:
if cov_kwds is None:
cov_kwds = {}
from statsmodels.base.covtype import get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
**cov_kwds)
def __getstate__(self):
try:
#remove unpicklable callback
self.mle_settings['callback'] = None
except (AttributeError, KeyError):
pass
return self.__dict__
@cache_readonly
def prsquared(self):
return 1 - self.llf/self.llnull
@cache_readonly
def llr(self):
return -2*(self.llnull - self.llf)
@cache_readonly
def llr_pvalue(self):
return stats.chisqprob(self.llr, self.df_model)
@cache_readonly
def llnull(self):
model = self.model
kwds = model._get_init_kwds()
# TODO: what parameters to pass to fit?
mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)
# TODO: consider catching and warning on convergence failure?
# in the meantime, try hard to converge. see
# TestPoissonConstrained1a.test_smoke
res_null = mod_null.fit(disp=0, warn_convergence=False,
maxiter=10000)
return res_null.llf
@cache_readonly
def fittedvalues(self):
return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]])
@cache_readonly
def aic(self):
return -2*(self.llf - (self.df_model+1))
@cache_readonly
def bic(self):
return -2*self.llf + np.log(self.nobs)*(self.df_model+1)
def _get_endog_name(self, yname, yname_list):
if yname is None:
yname = self.model.endog_names
if yname_list is None:
yname_list = self.model.endog_names
return yname, yname_list
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available from the returned object.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
DiscreteMargins : marginal effects instance
Returns an object that holds the marginal effects, standard
errors, confidence intervals, etc. See
`statsmodels.discrete.discrete_margins.DiscreteMargins` for more
information.
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
from statsmodels.discrete.discrete_margins import DiscreteMargins
return DiscreteMargins(self, (at, method, atexog, dummy, count))
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', ['MLE']),
('Date:', None),
('Time:', None),
#('No. iterations:', ["%d" % self.mle_retvals['iterations']]),
('converged:', ["%s" % self.mle_retvals['converged']])
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]),
('Log-Likelihood:', None),
('LL-Null:', ["%#8.5g" % self.llnull]),
('LLR p-value:', ["%#6.4g" % self.llr_pvalue])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
yname, yname_list = self._get_endog_name(yname, yname_list)
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[],
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
#diagnostic table not used yet
#smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
# yname=yname, xname=xname,
# title="")
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
-----------
xname : List of strings of length equal to the number of parameters
Names of the independent variables (optional)
yname : string
Name of the dependent variable (optional)
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry
class CountResults(DiscreteResults):
__doc__ = _discrete_results_docs % {
"one_line_description" : "A results class for count data",
"extra_attr" : ""}
@cache_readonly
def resid(self):
"""
Residuals
Notes
-----
The residuals for Count models are defined as
.. math:: y - p
where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables
are also handled.
"""
return self.model.endog - self.predict()
class NegativeBinomialResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description" : "A results class for NegativeBinomial 1 and 2",
"extra_attr" : ""}
@cache_readonly
def lnalpha(self):
return np.log(self.params[-1])
@cache_readonly
def lnalpha_std_err(self):
return self.bse[-1] / self.params[-1]
@cache_readonly
def aic(self):
# + 1 because we estimate alpha
k_extra = getattr(self.model, 'k_extra', 0)
return -2*(self.llf - (self.df_model + self.k_constant + k_extra))
@cache_readonly
def bic(self):
# + 1 because we estimate alpha
k_extra = getattr(self.model, 'k_extra', 0)
return -2*self.llf + np.log(self.nobs)*(self.df_model +
self.k_constant + k_extra)
class L1CountResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for count data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
#discretefit = CountResults(self, cntfit)
def __init__(self, model, cntfit):
super(L1CountResults, self).__init__(model, cntfit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = cntfit.mle_retvals['trimmed']
self.nnz_params = (self.trimmed == False).sum()
# update degrees of freedom
self.model.df_model = self.nnz_params - 1
self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
# adjust for extra parameter in NegativeBinomial nb1 and nb2
# extra parameter is not included in df_model
k_extra = getattr(self.model, 'k_extra', 0)
self.model.df_model -= k_extra
self.model.df_resid += k_extra
self.df_model = self.model.df_model
self.df_resid = self.model.df_resid
class PoissonResults(CountResults):
def predict_prob(self, n=None, exog=None, exposure=None, offset=None,
transform=True):
"""
Return predicted probability of each count level for each observation
Parameters
----------
n : array-like or int
The counts for which you want the probabilities. If n is None
then the probabilities for each count from 0 to max(y) are
given.
Returns
-------
ndarray
A nobs x n array where len(`n`) columns are indexed by the count
n. If n is None, then column 0 is the probability that each
observation is 0, column 1 is the probability that each
observation is 1, etc.
"""
if n is not None:
counts = np.atleast_2d(n)
else:
counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1))
mu = self.predict(exog=exog, exposure=exposure, offset=offset,
transform=transform, linear=False)[:,None]
# uses broadcasting
return stats.poisson.pmf(counts, mu)
class L1PoissonResults(L1CountResults, PoissonResults):
pass
class L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults):
pass
class OrderedResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" : "A results class for ordered discrete data." , "extra_attr" : ""}
pass
class BinaryResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" : "A results class for binary data", "extra_attr" : ""}
def pred_table(self, threshold=.5):
"""
Prediction table
Parameters
----------
threshold : scalar
Number between 0 and 1. Threshold above which a prediction is
considered 1 and below which a prediction is considered 0.
Notes
------
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
model = self.model
actual = model.endog
pred = np.array(self.predict() > threshold, dtype=float)
return np.histogram2d(actual, pred, bins=2)[0]
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
smry = super(BinaryResults, self).summary(yname, xname, title, alpha,
yname_list)
fittedvalues = self.model.cdf(self.fittedvalues)
absprederror = np.abs(self.model.endog - fittedvalues)
predclose_sum = (absprederror < 1e-4).sum()
predclose_frac = predclose_sum / len(fittedvalues)
#add warnings/notes
etext = []
if predclose_sum == len(fittedvalues): #nobs?
wstr = "Complete Separation: The results show that there is"
wstr += "complete separation.\n"
wstr += "In this case the Maximum Likelihood Estimator does "
wstr += "not exist and the parameters\n"
wstr += "are not identified."
etext.append(wstr)
elif predclose_frac > 0.1: # TODO: get better diagnosis
wstr = "Possibly complete quasi-separation: A fraction "
wstr += "%4.2f of observations can be\n" % predclose_frac
wstr += "perfectly predicted. This might indicate that there "
wstr += "is complete\nquasi-separation. In this case some "
wstr += "parameters will not be identified."
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
summary.__doc__ = DiscreteResults.summary.__doc__
@cache_readonly
def resid_dev(self):
"""
Deviance residuals
Notes
-----
Deviance residuals are defined
.. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2}
where
:math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
#These are the deviance residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
M = 1
p = self.predict()
#Y_0 = np.where(exog == 0)
#Y_M = np.where(exog == M)
#NOTE: Common covariate patterns are not yet handled
res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \
endog*np.sqrt(2*M*np.abs(np.log(p)))
return res
@cache_readonly
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
# use unique row pattern?
M = 1
p = self.predict()
return (endog - M*p)/np.sqrt(M*p*(1-p))
@cache_readonly
def resid_response(self):
"""
The response residuals
Notes
-----
Response residuals are defined to be
.. math:: y - p
where :math:`p=cdf(X\\beta)`.
"""
return self.model.endog - self.predict()
class LogitResults(BinaryResults):
__doc__ = _discrete_results_docs % {
"one_line_description" : "A results class for Logit Model",
"extra_attr" : ""}
@cache_readonly
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Logit model are defined
.. math:: y - p
where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response`
for the Logit model.
"""
# Generalized residuals
return self.model.endog - self.predict()
class ProbitResults(BinaryResults):
__doc__ = _discrete_results_docs % {
"one_line_description" : "A results class for Probit Model",
"extra_attr" : ""}
@cache_readonly
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Probit model are defined
.. math:: y\\frac{\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)}
"""
# generalized residuals
model = self.model
endog = model.endog
XB = self.predict(linear=True)
pdf = model.pdf(XB)
cdf = model.cdf(XB)
return endog * pdf/cdf - (1-endog)*pdf/(1-cdf)
class L1BinaryResults(BinaryResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"Results instance for binary data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, bnryfit):
super(L1BinaryResults, self).__init__(model, bnryfit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = bnryfit.mle_retvals['trimmed']
self.nnz_params = (self.trimmed == False).sum()
self.model.df_model = self.nnz_params - 1
self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
self.df_model = self.model.df_model
self.df_resid = self.model.df_resid
class MultinomialResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for multinomial data", "extra_attr" : ""}
def _maybe_convert_ynames_int(self, ynames):
# see if they're integers
try:
for i in ynames:
if ynames[i] % 1 == 0:
ynames[i] = str(int(ynames[i]))
except TypeError:
pass
return ynames
def _get_endog_name(self, yname, yname_list, all=False):
"""
If all is False, the first variable name is dropped
"""
model = self.model
if yname is None:
yname = model.endog_names
if yname_list is None:
ynames = model._ynames_map
ynames = self._maybe_convert_ynames_int(ynames)
# use range below to ensure sortedness
ynames = [ynames[key] for key in range(int(model.J))]
ynames = ['='.join([yname, name]) for name in ynames]
if not all:
yname_list = ynames[1:] # assumes first variable is dropped
else:
yname_list = ynames
return yname, yname_list
def pred_table(self):
"""
Returns the J x J prediction table.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
J = self.model.J
# these are the actual, predicted indices
idx = lzip(self.model.endog, self.predict().argmax(1))
return np.histogram2d(self.model.endog, self.predict().argmax(1),
bins=J)[0]
@cache_readonly
def bse(self):
bse = np.sqrt(np.diag(self.cov_params()))
return bse.reshape(self.params.shape, order='F')
@cache_readonly
def aic(self):
return -2*(self.llf - (self.df_model+self.model.J-1))
@cache_readonly
def bic(self):
return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1)
def conf_int(self, alpha=.05, cols=None):
confint = super(DiscreteResults, self).conf_int(alpha=alpha,
cols=cols)
return confint.transpose(2,0,1)
def margeff(self):
raise NotImplementedError("Use get_margeff instead")
@cache_readonly
def resid_misclassified(self):
"""
Residuals indicating which observations are misclassified.
Notes
-----
The residuals for the multinomial model are defined as
.. math:: argmax(y_i) \\neq argmax(p_i)
where :math:`argmax(y_i)` is the index of the category for the
endogenous variable and :math:`argmax(p_i)` is the index of the
predicted probabilities for each category. That is, the residual
is a binary indicator that is 0 if the category with the highest
predicted probability is the same as that of the observed variable
and 1 otherwise.
"""
# it's 0 or 1 - 0 for correct prediction and 1 for a missed one
return (self.model.wendog.argmax(1) !=
self.predict().argmax(1)).astype(float)
def summary2(self, alpha=0.05, float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
-----------
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_dict(summary2.summary_model(self))
# One data frame per value of endog
eqn = self.params.shape[1]
confint = self.conf_int(alpha)
for i in range(eqn):
coefs = summary2.summary_params(self, alpha, self.params[:,i],
self.bse[:,i], self.tvalues[:,i], self.pvalues[:,i],
confint[i])
# Header must show value of endog
level_str = self.model.endog_names + ' = ' + str(i)
coefs[level_str] = coefs.index
coefs = coefs.ix[:,[-1,0,1,2,3,4,5]]
smry.add_df(coefs, index=False, header=True, float_format=float_format)
smry.add_title(results=self)
return smry
class L1MultinomialResults(MultinomialResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for multinomial data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, mlefit):
super(L1MultinomialResults, self).__init__(model, mlefit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = mlefit.mle_retvals['trimmed']
self.nnz_params = (self.trimmed == False).sum()
#Note: J-1 constants
self.model.df_model = self.nnz_params - (self.model.J - 1)
self.model.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
self.df_model = self.model.df_model
self.df_resid = self.model.df_resid
#### Results Wrappers ####
class OrderedResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(OrderedResultsWrapper, OrderedResults)
class CountResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(CountResultsWrapper, CountResults)
class NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(NegativeBinomialResultsWrapper,
NegativeBinomialResults)
class PoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
#_methods = {
# "predict_prob" : "rows",
# }
#_wrap_methods = lm.wrap.union_dicts(
# lm.RegressionResultsWrapper._wrap_methods,
# _methods)
wrap.populate_wrapper(PoissonResultsWrapper, PoissonResults)
class L1CountResultsWrapper(lm.RegressionResultsWrapper):
pass
class L1PoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
#_methods = {
# "predict_prob" : "rows",
# }
#_wrap_methods = lm.wrap.union_dicts(
# lm.RegressionResultsWrapper._wrap_methods,
# _methods)
wrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults)
class L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1NegativeBinomialResultsWrapper,
L1NegativeBinomialResults)
class BinaryResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {"resid_dev" : "rows",
"resid_generalized" : "rows",
"resid_pearson" : "rows",
"resid_response" : "rows"
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(BinaryResultsWrapper, BinaryResults)
class L1BinaryResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults)
class MultinomialResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {"resid_misclassified" : "rows"}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults)
class L1MultinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults)
if __name__=="__main__":
import numpy as np
import statsmodels.api as sm
# Scratch work for negative binomial models
# dvisits was written using an R package, I can provide the dataset
# on request until the copyright is cleared up
#TODO: request permission to use dvisits
data2 = np.genfromtxt('../datasets/dvisits/dvisits.csv', names=True)
# note that this has missing values for Accident
endog = data2['doctorco']
exog = data2[['sex','age','agesq','income','levyplus','freepoor',
'freerepa','illness','actdays','hscore','chcond1',
'chcond2']].view(float).reshape(len(data2),-1)
exog = sm.add_constant(exog, prepend=True)
poisson_mod = Poisson(endog, exog)
poisson_res = poisson_mod.fit()
# nb2_mod = NegBinTwo(endog, exog)
# nb2_res = nb2_mod.fit()
# solvers hang (with no error and no maxiter warn...)
# haven't derived hessian (though it will be block diagonal) to check
# newton, note that Lawless (1987) has the derivations
# appear to be something wrong with the score?
# according to Lawless, traditionally the likelihood is maximized wrt to B
# and a gridsearch on a to determin ahat?
# or the Breslow approach, which is 2 step iterative.
nb2_params = [-2.190,.217,-.216,.609,-.142,.118,-.497,.145,.214,.144,
.038,.099,.190,1.077] # alpha is last
# taken from Cameron and Trivedi
# the below is from Cameron and Trivedi as well
# endog2 = np.array(endog>=1, dtype=float)
# skipped for now, binary poisson results look off?
data = sm.datasets.randhie.load()
nbreg = NegativeBinomial
mod = nbreg(data.endog, data.exog.view((float,9)))
#FROM STATA:
params = np.asarray([-.05654133, -.21214282, .0878311, -.02991813, .22903632,
.06210226, .06799715, .08407035, .18532336])
bse = [0.0062541, 0.0231818, 0.0036942, 0.0034796, 0.0305176, 0.0012397,
0.0198008, 0.0368707, 0.0766506]
lnalpha = .31221786
mod.loglike(np.r_[params,np.exp(lnalpha)])
poiss_res = Poisson(data.endog, data.exog.view((float,9))).fit()
func = lambda x: -mod.loglike(x)
grad = lambda x: -mod.score(x)
from scipy import optimize
# res1 = optimize.fmin_l_bfgs_b(func, np.r_[poiss_res.params,.1],
# approx_grad=True)
res1 = optimize.fmin_bfgs(func, np.r_[poiss_res.params,.1], fprime=grad)
from statsmodels.tools.numdiff import approx_hess_cs
# np.sqrt(np.diag(-np.linalg.inv(approx_hess_cs(np.r_[params,lnalpha], mod.loglike))))
#NOTE: this is the hessian in terms of alpha _not_ lnalpha
hess_arr = mod.hessian(res1)
|
bsd-3-clause
|
billy-inn/scikit-learn
|
examples/decomposition/plot_ica_blind_source_separation.py
|
349
|
2228
|
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
bsd-3-clause
|
Adai0808/scikit-learn
|
examples/model_selection/plot_train_error_vs_test_error.py
|
349
|
2577
|
"""
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
|
bsd-3-clause
|
mistercrunch/panoramix
|
superset/utils/csv.py
|
2
|
3022
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import urllib.request
from typing import Any, Dict, Optional
from urllib.error import URLError
import pandas as pd
negative_number_re = re.compile(r"^-[0-9.]+$")
# This regex will match if the string starts with:
#
# 1. one of -, @, +, |, =, %
# 2. two double quotes immediately followed by one of -, @, +, |, =, %
# 3. one or more spaces immediately followed by one of -, @, +, |, =, %
#
problematic_chars_re = re.compile(r'^(?:"{2}|\s{1,})(?=[\-@+|=%])|^[\-@+|=%]')
def escape_value(value: str) -> str:
"""
Escapes a set of special characters.
http://georgemauer.net/2017/10/07/csv-injection.html
"""
needs_escaping = problematic_chars_re.match(value) is not None
is_negative_number = negative_number_re.match(value) is not None
if needs_escaping and not is_negative_number:
# Escape pipe to be extra safe as this
# can lead to remote code execution
value = value.replace("|", "\\|")
# Precede the line with a single quote. This prevents
# evaluation of commands and some spreadsheet software
# will hide this visually from the user. Many articles
# claim a preceding space will work here too, however,
# when uploading a csv file in Google sheets, a leading
# space was ignored and code was still evaluated.
value = "'" + value
return value
def df_to_escaped_csv(df: pd.DataFrame, **kwargs: Any) -> Any:
escape_values = lambda v: escape_value(v) if isinstance(v, str) else v
# Escape csv headers
df = df.rename(columns=escape_values)
# Escape csv rows
df = df.applymap(escape_values)
return df.to_csv(**kwargs)
def get_chart_csv_data(
chart_url: str, auth_cookies: Optional[Dict[str, str]] = None
) -> Optional[bytes]:
content = None
if auth_cookies:
opener = urllib.request.build_opener()
cookie_str = ";".join([f"{key}={val}" for key, val in auth_cookies.items()])
opener.addheaders.append(("Cookie", cookie_str))
response = opener.open(chart_url)
content = response.read()
if response.getcode() != 200:
raise URLError(response.getcode())
if content:
return content
return None
|
apache-2.0
|
mwrightevent38/MissionPlanner
|
Lib/site-packages/scipy/signal/ltisys.py
|
53
|
23848
|
"""
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
|
gpl-3.0
|
xiaoxiamii/scikit-learn
|
sklearn/tests/test_multiclass.py
|
136
|
23649
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
|
bsd-3-clause
|
david-hoffman/pyOTF
|
notebooks/Microscope Imaging Models/easy_plot.py
|
1
|
2047
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# easy_plot.py
"""
An easy plotting function.
Copyright (c) 2020, David Hoffman
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
from dphutils import bin_ndarray
from pyotf.utils import easy_fft
# plot function 😬
def easy_plot(
psfs, labels, oversample_factor=1, res=1, gam=0.3, vmin=1e-3, interpolation="bicubic"
):
"""Make a nice plot."""
ncols = len(psfs)
assert ncols == len(labels), "Lengths mismatched"
assert ncols < 10
plot_size = 2.0
fig = plt.figure(None, (plot_size * ncols, plot_size * 4), dpi=150)
grid = ImageGrid(fig, 111, nrows_ncols=(4, ncols), axes_pad=0.1)
fig2, axp = plt.subplots(dpi=150, figsize=(plot_size * ncols, 4))
for (i, p), l, col in zip(enumerate(psfs), labels, grid.axes_column):
p = bin_ndarray(p, bin_size=oversample_factor)
p /= p.max()
col[0].imshow(p.max(1), norm=mpl.colors.PowerNorm(gam), interpolation=interpolation)
col[1].imshow(p.max(0), norm=mpl.colors.PowerNorm(gam), interpolation=interpolation)
col[0].set_title(l)
otf = abs(easy_fft(p))
otf /= otf.max()
otf = np.fmax(otf, vmin)
c = (len(otf) + 1) // 2
col[2].matshow(otf[:, c], norm=mpl.colors.LogNorm(), interpolation=interpolation)
col[3].matshow(otf[c], norm=mpl.colors.LogNorm(), interpolation=interpolation)
pp = p[:, c, c]
axp.plot((np.arange(len(pp)) - (len(pp) + 1) // 2) * res, pp / pp.max(), label=l)
for ax in grid:
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ylabels = "XZ", "XY"
ylabels += tuple(map(lambda x: r"$k_{{{}}}$".format(x), ylabels))
for ax, l in zip(grid.axes_column[0], ylabels):
ax.set_ylabel(l)
axp.yaxis.set_major_locator(plt.NullLocator())
axp.set_xlabel("Axial Position (µm)")
axp.set_title("On Axis Intensity")
axp.legend()
|
apache-2.0
|
gandalfcode/gandalf
|
examples/example12.py
|
1
|
1105
|
#==============================================================================
# example12.py
# Plot particle quantities in an alternative coordinate system.
#==============================================================================
from gandalf.analysis.facade import *
from matplotlib.colors import LogNorm
# Create simulation object from Boss-Bodenheimer parameters file
sim = newsim("bossbodenheimer.dat")
sim.SetParam("tend",0.02)
setupsim()
# Run simulation and plot x-y positions of SPH particles in the default
# units specified in the `bossbodenheimer.dat' parameters file.
plot("x","y")
addplot("x","y",type="star")
limit("x",-0.007,0.007)
limit("y",-0.007,0.007)
window()
render("x","y","rho",res=256,#norm=LogNorm(),
interpolation='bicubic')
limit("x",-0.007,0.007)
limit("y",-0.007,0.007)
run()
block()
# After pressing return, re-plot last snapshot but in new specified units (au).
window(1)
plot("x","y",xunit="au",yunit="au")
window(2)
render("x","y","rho",res=256,#norm=LogNorm(),
interpolation='bicubic')
limit("x",-0.007,0.007)
limit("y",-0.007,0.007)
block()
|
gpl-2.0
|
DroneBuster/ardupilot
|
Tools/LogAnalyzer/tests/TestOptFlow.py
|
26
|
14969
|
from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_ENABLE to 1 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
|
gpl-3.0
|
potash/scikit-learn
|
sklearn/utils/tests/test_extmath.py
|
7
|
24378
|
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
Xcsr = sparse.csr_matrix(X, dtype=dtype)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_raise_message(RuntimeError,
'cumsum was found to be unstable: its last element '
'does not correspond to sum',
stable_cumsum, r, rtol=0, atol=0)
|
bsd-3-clause
|
qiuwch/unrealcv
|
client/examples/interactive-control.py
|
2
|
1540
|
# A toy example to use python to control the game.
import sys
sys.path.append('..')
from unrealcv import client
import matplotlib.pyplot as plt
import numpy as np
help_message = '''
A demo showing how to control a game using python
a, d: rotate camera to left and right.
q, e: move camera up and down.
'''
plt.rcParams['keymap.save'] = ''
def onpress(event):
print event.key
if event.key == 'a':
rot[1] += 1
if event.key == 'd':
rot[1] -= 1
if event.key == 'q':
loc[2] += 1
if event.key == 'e':
loc[2] -= 1
cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
client.request(cmd)
cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
client.request(cmd)
loc = None
rot = None
def main():
client.connect()
if not client.isconnected():
print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
return
else:
print help_message
init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]
global rot, loc
loc = init_loc; rot = init_rot
image = np.zeros((300, 300))
fig, ax = plt.subplots()
fig.canvas.mpl_connect('key_press_event', onpress)
ax.imshow(image)
plt.title('Keep this window in focus, used to receive key press event')
plt.axis('off')
plt.show() # Add event handler
if __name__ == '__main__':
main()
|
mit
|
7kbird/chrome
|
ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py
|
154
|
8545
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
|
bsd-3-clause
|
rahlk/RAAT
|
src/Planners/threshold/naive.py
|
2
|
8270
|
"""
XTREE
"""
from __future__ import print_function, division
# import pandas as pd, numpy as np
# from pdb import set_trace
import sys
sys.path.append('..')
from tools.oracle import *
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import f_classif, f_regression
from random import uniform
from xtree import xtree
from tools.sk import rdivDemo
from texttable import *
from CD import method1
# from tools.sk import *
# from tools.misc import *
# import tools.pyC45 as pyC45
# from tools.Discretize import discretize
# from timeit import time
# from numpy.random import normal as randn
# from tools.tune.dEvol import tuner
def VARL(coef,inter,p0=0.05):
"""
:param coef: Slope of (Y=aX+b)
:param inter: Intercept (Y=aX+b)
:param p0: Confidence Interval. Default p=0.05 (95%)
:return: VARL threshold
1 / / p0 \ \
VARL = ----- | log | ------ | - intercept |
slope \ \ 1 - p0 / /
"""
return (np.log(p0/(1-p0))-inter)/coef
def apply(changes, row):
all = []
for idx, thres in enumerate(changes):
newRow = row
if thres>0:
if newRow[idx]>thres:
newRow[idx] = uniform(0, thres)
all.append(newRow)
return all
def apply2(changes, row):
newRow = row
for idx, thres in enumerate(changes):
if thres>0:
if newRow[idx]>thres:
newRow[idx] = uniform(0, thres)
return newRow
def shatnawi10(train, test, rftrain=None, tunings=None, verbose=False):
"Threshold based planning"
""" Compute Thresholds
"""
data_DF=csv2DF(train, toBin=True)
metrics=[str[1:] for str in data_DF[data_DF.columns[:-1]]]
ubr = LogisticRegression() # Init LogisticRegressor
X = data_DF[data_DF.columns[:-1]].values # Independent Features (CK-Metrics)
y = data_DF[data_DF.columns[-1]].values # Dependent Feature (Bugs)
ubr.fit(X,y) # Fit Logit curve
inter = ubr.intercept_[0] # Intercepts
coef = ubr.coef_[0] # Slopes
pVal = f_classif(X,y)[1] # P-Values
changes = len(metrics)*[-1]
if verbose:
"Pretty Print Thresholds"
table = Texttable()
table.set_cols_align(["l","l","l"])
table.set_cols_valign(["m","m","m"])
table.set_cols_dtype(['t', 't', 't'])
table_rows=[["Metric", "Threshold", "P-Value"]]
"Find Thresholds using VARL"
for Coeff, P_Val, idx in zip(coef, pVal, range(len(metrics))): #xrange(len(metrics)):
thresh = VARL(Coeff, inter, p0=0.065) # VARL p0=0.05 (95% CI)
if thresh>0 and P_Val<0.05:
if verbose: table_rows.append([metrics[idx], "%0.2f"%thresh, "%0.3f"%P_Val])
changes[idx]=thresh
if verbose:
table.add_rows(table_rows)
print(table.draw(), "\n")
#
# """ Apply Plans Sequentially
# """
# nChange = len(table_rows)-1
# testDF = csv2DF(test, toBin=True)
# buggy = [testDF.iloc[n].values.tolist() for n in xrange(testDF.shape[0]) if testDF.iloc[n][-1]>0]
# before = len(buggy)
# new =[]
# for n in xrange(nChange):
# new.append(["Reduce "+table_rows[n+1][0]])
# for _ in xrange(30):
# modified=[]
# for attr in buggy:
# modified.append(apply(changes, attr)[n])
#
# modified=pd.DataFrame(modified, columns = data_DF.columns)
# before, after = rforest(train, modified, tunings=None, bin = True, regress=False)
# gain = (1 - sum(after)/sum(before))*100
# new[n].append(gain)
#
# return new
""" Apply Plans Sequentially
"""
nChange = len([c for c in changes if c>0])
testDF = csv2DF(test, toBin=True)
buggy = [testDF.iloc[n].values.tolist() for n in xrange(testDF.shape[0]) if testDF.iloc[n][-1]>0]
before = len(buggy)
new =["Shatnawi"]
# for n in xrange(nChange):
for _ in xrange(50):
modified=[]
for attr in buggy:
modified.append(apply2(changes, attr))
modified=pd.DataFrame(modified, columns = data_DF.columns)
before, after = rforest(train, modified, tunings=None, bin = True, regress=False)
gain = (1 - sum(after)/sum(before))*100
new.append(gain)
return new
def alves10(train, test, rftrain=None, tunings=None, verbose=False):
import numpy as np
import matplotlib.pyplot as plt
data_DF=csv2DF(train, toBin=True)
metrics=[str[1:] for str in data_DF[data_DF.columns[:-1]]]
X = data_DF[data_DF.columns[:-1]].values # Independent Features (CK-Metrics)
try: tot_loc = data_DF.sum()["$loc"]
except: set_trace()
def entWeight(X):
Y =[]
for x in X:
loc = x[10] # LOC is the 10th index position.
Y.append([xx*loc/tot_loc for xx in x])
return Y
X = entWeight(X)
denom = pd.DataFrame(X).sum().values
norm_sum= pd.DataFrame(pd.DataFrame(X).values/denom, columns=metrics)
# set_trace()
y = data_DF[data_DF.columns[-1]].values # Dependent Feature (Bugs)
pVal = f_classif(X,y)[1] # P-Values
if verbose:
"Pretty Print Thresholds"
table = Texttable()
table.set_cols_align(["l","l","l"])
table.set_cols_valign(["m","m","m"])
table.set_cols_dtype(['t', 't', 't'])
table_rows=[["Metric", "Threshold", "P-Value"]]
"Find Thresholds"
cutoff=[]
cumsum = lambda vals: [sum(vals[:i]) for i, __ in enumerate(vals)]
def point(array):
for idx, val in enumerate(array):
if val>0.7: return idx
for idx in xrange(len(data_DF.columns[:-1])):
# Setup Cumulative Dist. Func.
name = metrics[idx]
loc = data_DF["$loc"].values
vals = norm_sum[name].values
sorted_ids = np.argsort(vals)
cumulative = [sum(vals[:i]) for i,__ in enumerate(sorted(vals))]
# set_trace()
if pVal[idx]<0.05:
cutpoint = point(cumulative)
cutoff.append(vals[sorted_ids[cutpoint]]*tot_loc/loc[sorted_ids[cutpoint]]*denom[idx])
if verbose:
try: table_rows.append([metrics[idx]
, "%0.2f"%(vals[sorted_ids[cutpoint]]*tot_loc/loc*denom[idx])
, "%0.3f"%pVal[idx]])
except: set_trace()
else:
cutoff.append(-1)
if verbose:
table.add_rows(table_rows)
print(table.draw(), "\n")
# """ Apply Plans Sequentially
# """
# nChange = len([c for c in cutoff if c>0])
# testDF = csv2DF(test, toBin=True)
# buggy = [testDF.iloc[n].values.tolist() for n in xrange(testDF.shape[0]) if testDF.iloc[n][-1]>0]
# before = len(buggy)
# new =[]
# for n in xrange(nChange):
# new.append(["Reduce "+table_rows[n+1][0]])
# for _ in xrange(10):
# modified=[]
# for attr in buggy:
# try: modified.append(apply(cutoff, attr)[n])
# except: set_trace()
#
# modified=pd.DataFrame(modified, columns = data_DF.columns)
# before, after = rforest(train, modified, tunings=None, bin = True, regress=False)
# gain = (1 - sum(after)/sum(before))*100
# new[n].append(gain)
#
# return new
""" Apply Plans Sequentially
"""
nChange = len([c for c in cutoff if c>0])
testDF = csv2DF(test, toBin=True)
buggy = [testDF.iloc[n].values.tolist() for n in xrange(testDF.shape[0]) if testDF.iloc[n][-1]>0]
before = len(buggy)
new =['Alves']
for n in xrange(nChange):
for _ in xrange(50):
modified=[]
for attr in buggy:
try: modified.append(apply2(cutoff, attr))
except: set_trace()
modified=pd.DataFrame(modified, columns = data_DF.columns)
before, after = rforest(train, modified, tunings=None, bin = True, regress=False)
gain = (1 - sum(after)/sum(before))*100
new.append(gain)
return new
def _testAlves():
for name in ['ant', 'ivy', 'jedit', 'lucene', 'poi']:
print("##", name, '\n')
train, test = explore(dir='../Data/Jureczko/', name=name)
alves10(train, test, verbose=True)
def __testAll():
for name in ['ant', 'ivy', 'jedit', 'lucene', 'poi']:
E = []
print("##", name)
train, test = explore(dir='../Data/Jureczko/', name=name)
# E.append(shatnawi10(train, test, verbose=False))
# E.append(alves10(train, test, verbose=False))
E.append(['CD'])
E[-1].extend([method1(train, test, justDeltas=False) for _ in xrange(10)])
# set_trace()
rdivDemo(E, isLatex=True, globalMinMax=True, high=100, low=0)
print("\n")
if __name__=="__main__":
from logo import logo
__testAll()
# _testAlves()
|
mit
|
xzh86/scikit-learn
|
doc/conf.py
|
210
|
8446
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
jejjohnson/manifold_learning
|
src/python/se_demo.py
|
1
|
1844
|
import matplotlib.pyplot as plt
from time import time
from sklearn import (manifold, datasets)
from manifold_learning.se import SchroedingerEigenmaps
from data.get_hsi_data import get_data
from mpl_toolkits.mplot3d import Axes3D
Axes3D
# swiss roll test to test out my function versus theirs
def swiss_roll_test():
n_points = 750
X, color = datasets.samples_generator.make_s_curve(n_points,
random_state=0)
fig = plt.figure(figsize=(5,10))
ax = fig.add_subplot(311, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, label='Data', cmap=plt.cm.Spectral)
ax.set_title('Original Dataset')
n_neighbors=20
n_components=2
# Laplacian Eigenmaps (scikit-learn)
t0 = time()
ml_model = manifold.SpectralEmbedding(n_neighbors=n_neighbors,
n_components=n_components)
Y = ml_model.fit_transform(X)
t1 = time()
# 2D Projection
ax = fig.add_subplot(312)
ax.scatter(Y[:,0], Y[:,1], c=color, label='scikit', cmap=plt.cm.Spectral)
ax.set_title('Sklearn-LE: {t:.2g}'.format(t=t1-t0))
# Laplacian Eigenmaps (my version)
t0 = time()
ml_model = SchroedingerEigenmaps(n_components=n_components,
n_neighbors=n_neighbors)
Y = ml_model.fit_transform(X)
t1 = time()
# 2D Projection
ax = fig.add_subplot(313)
ax.scatter(Y[:,0], Y[:,1], c=color, label='my algorithm', cmap=plt.cm.Spectral)
ax.set_title('LE: {t:.2g}'.format(t=t1-t0))
# Todo: Schroedinger Eigenmaps (Spatial Spectral Potential)
# Todo: Schroedinger Eigenmaps (Partial Labels Potential)
plt.show()
def hsi_test():
# get indian pines data
img = get_data()
if __name__ == "__main__":
swiss_roll_test()
#hsi_test()
|
mit
|
faner-father/tushare
|
tushare/datayes/subject.py
|
10
|
32740
|
# -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Subject():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def SocialDataXQ(self, beginDate='', endDate='', ticker='', field=''):
"""
包含雪球社交统计数据,输入一个或多个证券交易代码、统计起止日期,获取该证券一段时间内每天的雪球帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAXQ%(beginDate, endDate, ticker, field))
return _ret_data(code, result)
def SocialDataXQByTicker(self, ticker='', field=''):
"""
包含按单只证券代码获取的雪球社交数据,输入一个证券交易代码,获取该证券每天的雪球帖子数量、及帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAXQBYTICKER%(ticker, field))
return _ret_data(code, result)
def SocialDataXQByDate(self, statisticsDate='', field=''):
"""
包含按单个统计日期获取的雪球社交数据,输入一个统计日期,获取当天雪球帖子涉及的所有证券、各证券雪球帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAXQBYDATE%(statisticsDate, field))
return _ret_data(code, result)
def NewsInfo(self, newsID='', field=''):
"""
包含新闻基本信息。输入新闻ID,获取新闻基本信息,如:新闻ID、标题、摘要、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、每天新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSINFO%(newsID, field))
return _ret_data(code, result)
def NewsInfoByTime(self, newsPublishDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内的新闻基本信息。输入新闻发布的日期、起止时间,获取该时间段内的新闻相关信息,如:新闻ID、标题、摘要、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSINFOBYTIME%(newsPublishDate, beginTime, endTime, field))
return _ret_data(code, result)
def NewsContent(self, newsID='', field=''):
"""
包含新闻全文等信息。输入新闻ID,获取新闻全文相关字段,如:新闻ID、标题、摘要、正文、来源链接、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSCONTENT%(newsID, field))
return _ret_data(code, result)
def NewsContentByTime(self, newsPublishDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内的新闻全文等信息。输入新闻发布的日期、起止时间,获取该时间段内的新闻全文等信息,如:新闻ID、标题、摘要、正文、来源链接、初始来源、作者、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSCONTENTBYTIME%(newsPublishDate, beginTime, endTime, field))
return _ret_data(code, result)
def CompanyByNews(self, newsID='', field=''):
"""
包含新闻关联的公司数据,同时可获取针对不同公司的新闻情感数据。输入新闻ID,获取相关的公司信息,如:公司代码、公司全称,同时返回新闻标题、发布时间、入库时间信息。其中,公司代码可继续通过证券编码及基本上市信息(getSecID)查找公司相关的证券。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.COMPANYBYNEWS%(newsID, field))
return _ret_data(code, result)
def NewsByCompany(self, partyID='', beginDate='', endDate='', field=''):
"""
包含公司关联的新闻数据,同时可获取针对不同公司的新闻情感数据。输入公司代码、查询的新闻发布起止时间,获取相关的新闻信息,如:新闻ID、新闻标题、发布来源、发布时间、新闻入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSBYCOMPANY%(partyID, beginDate, endDate, field))
return _ret_data(code, result)
def TickersByNews(self, newsID='', field=''):
"""
包含新闻相关的证券数据,同时可获取针对不同证券的新闻情感数据。输入新闻ID,获取相关的证券信息,如:证券代码、证券简称、证券交易场所,同时返回新闻标题、发布来源、发布时间、入库时间等新闻相关信息。每天更新。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.TICKERSBYNEWS%(newsID, field))
return _ret_data(code, result)
def NewsByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券相关的新闻数据,同时可获取针对不同证券的新闻情感数据。输入证券代码或简称、查询的新闻发布起止时间,同时可输入证券交易所代码,获取相关新闻数据,如:新闻ID、新闻标题、发布来源、发布时间、入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def ThemesContent(self, isMain='', themeID='', themeName='', themeSource='', field=''):
"""
包含所有主题基本信息。输入主题代码或名称、主题来源,可以获取主题相关信息,包括主题ID、主题名称、主题描述、主题来源、当天是否活跃、主题插入时间、主题更新时间等。(注:1、主题基期自2011/4/16始;2、数据按日更新主题活跃状态。)
"""
code, result = self.client.getData(vs.THEMESCONTENT%(isMain, themeID, themeName, themeSource, field))
return _ret_data(code, result)
def TickersByThemes(self, themeID='', themeName='', beginDate='', endDate='', isNew='', field=''):
"""
包含主题关联的证券数据。输入主题代码或名称,可以获取主题关联的证券等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取主题在该时间段内关联的证券信息。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新、同时刷新关联状态。)
"""
code, result = self.client.getData(vs.TICKERSBYTHEMES%(themeID, themeName, beginDate, endDate, isNew, field))
return _ret_data(code, result)
def ThemesTickersInsert(self, themeID='', themeName='', beginDate='', endDate='', field=''):
"""
获取一段时间内主题新增的关联证券数据,输入主题代码或名称、查询起止时间,可以获取该时间段内主题新增的关联证券信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESTICKERSINSERT%(themeID, themeName, beginDate, endDate, field))
return _ret_data(code, result)
def ThemesTickersDelete(self, themeID='', themeName='', beginDate='', endDate='', field=''):
"""
获取一段时间内主题删除的关联证券数据,输入主题代码或名称、查询起止时间,可以获取该时间段内主题删除的关联证券信息,包括证券代码、证券简称、证券交易场所,同时返回关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESTICKERSDELETE%(themeID, themeName, beginDate, endDate, field))
return _ret_data(code, result)
def ThemesByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券关联的主题数据。输入证券交易所代码、证券交易代码或简称,可以获取关联的主题等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取证券在该时间段内关联到的主题信息。(注:1、主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def ThemesPeriod(self, isLatest='', themeID='', themeName='', field=''):
"""
包含主题活跃周期数据。输入主题代码或名称,获取主题的活跃时间等信息,同时可输入是否最新活跃期,获取主题最新的活跃周期。(注:1、主题活跃周期数据自2013/1/1始;2、新闻量在某段时间内达到活跃阈值的主题即为活跃主题;3、数据按日更新。)
"""
code, result = self.client.getData(vs.THEMESPERIOD%(isLatest, themeID, themeName, field))
return _ret_data(code, result)
def ActiveThemes(self, date='', field=''):
"""
获取某天活跃的主题数据。输入一个日期,获取在该日期活跃的主题。(注:1、主题活跃周期数据自2013/1/1始;2、新闻量在某段时间内达到活跃阈值的主题即为活跃主题;3、数据按日更新。)
"""
code, result = self.client.getData(vs.ACTIVETHEMES%(date, field))
return _ret_data(code, result)
def ThemesSimilarity(self, themeID='', themeName='', field=''):
"""
获取与某主题相似的其他主题数据。输入主题代码或名称,可以获取相似的主题信息,包括相似主题代码、相似主题名称、主题文本的相似度、主题关联证券的相似度。数据按日更新。
"""
code, result = self.client.getData(vs.THEMESSIMILARITY%(themeID, themeName, field))
return _ret_data(code, result)
def ThemesHeat(self, themeID='', themeName='', beginDate='', endDate='', field=''):
"""
包含主题的热度数据。输入主题代码或名称、同时可输入起止日期,获取一段时间内主题每天的新闻数量、主题热度(即主题每天新闻数量占当日所有主题新闻总量的百分比(%))。(注:数据自2014/1/1始,每天更新)
"""
code, result = self.client.getData(vs.THEMESHEAT%(themeID, themeName, beginDate, endDate, field))
return _ret_data(code, result)
def SectorThemesByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券关联的主题数据,主题源自申万行业。输入证券交易所代码、证券交易代码或简称,可以获取关联的主题等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取证券在该时间段内关联到的主题信息。(注:1、源自行业的主题与证券的关联自2014/12/26始;2、数据按日更新、同时刷新关联状态。)
"""
code, result = self.client.getData(vs.SECTORTHEMESBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def WebThemesByTickers(self, secID='', secShortName='', ticker='', beginDate='', endDate='', exchangeCD='', field=''):
"""
包含证券关联的主题数据,主题源自网络。输入证券交易所代码、证券交易代码或简称,可以获取关联的主题等信息,包括证券代码、证券简称、证券交易场所,同时返回三个维度的关联分数、关联开始时间、关联结束时间、关联具体描述、数据入库及更新时间,同时可输入查询起止时间,以获取证券在该时间段内关联到的主题信息。(注:1、源自网络的主题与证券的关联自2013/12/28始、2014年12月起关联数据完整;2、数据按日更新。)
"""
code, result = self.client.getData(vs.WEBTHEMESBYTICKERS%(secID, secShortName, ticker, beginDate, endDate, exchangeCD, field))
return _ret_data(code, result)
def NewsHeatIndex(self, beginDate='', endDate='', exchangeCD='', secID='', secShortName='', ticker='', field=''):
"""
包含证券相关的新闻热度指数数据,输入一个或多个证券交易代码、起止日期,获取该证券一段时间内的新闻热度指数(即证券当天关联新闻数量占当天新闻总量的百分比(%))。每天更新。(注:1、2014/1/1起新闻来源众多、指数统计有效,2013年及之前的网站来源不全、数据波动大,数据自2004/10/28始;2、新闻量的统计口径为经算法处理后证券关联到的所有常规新闻;3、数据按日更新。)
"""
code, result = self.client.getData(vs.NEWSHEATINDEX%(beginDate, endDate, exchangeCD, secID, secShortName, ticker, field))
return _ret_data(code, result)
def NewsSentimentIndex(self, beginDate='', endDate='', exchangeCD='', secID='', secShortName='', ticker='', field=''):
"""
包含证券相关的新闻情感指数数据,输入一个或多个证券交易代码、起止日期,获取该证券一段时间内的新闻情感指数(即当天证券关联新闻的情感均值)。(注:1、2014/1/1起新闻来源众多、指数统计有效,2013年及之前的网站来源不全、数据波动大,数据自2004/10/28始;2、新闻量的统计口径为经算法处理后证券关联到的所有常规新闻;3、数据按日更新。)
"""
code, result = self.client.getData(vs.NEWSSENTIMENTINDEX%(beginDate, endDate, exchangeCD, secID, secShortName, ticker, field))
return _ret_data(code, result)
def ReportByTicker(self, ticker='', beginDate='', endDate='', field=''):
"""
根据证券代码获取相应公告分类结果,输入一个或多个证券交易代码,可以获取所查询证券相关的公告信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告分类结果、公告分类入库时间、更新时间。(注:公告分类数据自2009/1/5始,按日更新)
"""
code, result = self.client.getData(vs.REPORTBYTICKER%(ticker, beginDate, endDate, field))
return _ret_data(code, result)
def ReportByCategory(self, beginDate='', Category='', endDate='', field=''):
"""
根据公告分类获取相应公告信息,输入一个或多个公告分类,可以获取所查询证券相关的公告信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告发布时间、公告所属分类、公告分类入库时间、更新时间。(注:公告分类数据自2009/1/5始,按日更新)
"""
code, result = self.client.getData(vs.REPORTBYCATEGORY%(beginDate, Category, endDate, field))
return _ret_data(code, result)
def ReportContent(self, ticker='', beginDate='', endDate='', field=''):
"""
根据证券代码获取公告内容,输入一个或多个证券交易代码,可以获取所查询证券相关的公告信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告发布时间、公告具体内容、公告链接、公告入库时间。(注:公告数据自2000/1/8始,按日更新)
"""
code, result = self.client.getData(vs.REPORTCONTENT%(ticker, beginDate, endDate, field))
return _ret_data(code, result)
def ActiveThemesInsert(self, beginDate='', endDate='', isLatest='', themeSource='', field=''):
"""
获取一段时间内新增(开始)的活跃主题数据,输入的时间参数在主题活跃周期的起始时间列进行查询。输入查询起止时间、是否最新活跃期、主题来源,可以获取该时间段内开始活跃的主题信息,包括主题ID、主题名称、主题开始时间、主题结束时间、是否最新活跃期、数据入库及更新时间。(注:1、主题活跃周期数据自2013/1/1始;2、数据按日更新。)
"""
code, result = self.client.getData(vs.ACTIVETHEMESINSERT%(beginDate, endDate, isLatest, themeSource, field))
return _ret_data(code, result)
def ActiveThemesDelete(self, beginDate='', endDate='', isLatest='', themeSource='', field=''):
"""
获取一段时间内删除(退出)的活跃主题数据,输入的时间参数在主题活跃周期的结束时间列进行查询。输入查询起止时间、是否最新活跃期、主题来源,可以获取该时间段内停止活跃的主题信息,包括主题ID、主题名称、主题开始时间、主题结束时间、是否最新活跃期、数据入库及更新时间。(注:1、主题活跃周期数据自2013/1/1始;2、数据按日更新。3、查询当天无活跃主题被删除、需等第二天9:00之后获取前一天停止活跃的主题数据。)
"""
code, result = self.client.getData(vs.ACTIVETHEMESDELETE%(beginDate, endDate, isLatest, themeSource, field))
return _ret_data(code, result)
def ThemesCluster(self, isMain='', themeID='', themeName='', field=''):
"""
获取当天活跃主题聚类对应关系数据。输入聚类后的主要主题代码或名称,可以获取同一类别的主题相关信息,包括主题ID、主题名称、主题插入时间、主题更新时间等。(注:1、可先在主题基本信息(getThemesContent)这个API中获取当天聚类后的主题;2、可输入isMain=0,在返回的数据中剔除主题自身的对应;3、数据每天刷新,只能获取当天数据。)
"""
code, result = self.client.getData(vs.THEMESCLUSTER%(isMain, themeID, themeName, field))
return _ret_data(code, result)
def ThemesByNews(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWS%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def ThemesByNewsCompanyRel(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据,只包含与公司相关的新闻。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSCOMPANYREL%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def ThemesInsertDB(self, beginDate='', endDate='', themeSource='', field=''):
"""
获取一段时间内新入库的主题数据。输入查询起止时间,可以获取该时间段内新入库的主题信息,包括主题ID、主题名称、主题描述、主题来源、当天是否活跃、主题插入时间、主题更新时间等。(注:1、主题基期自2011/4/16始;2、数据按日更新主题活跃状态。)
"""
code, result = self.client.getData(vs.THEMESINSERTDB%(beginDate, endDate, themeSource, field))
return _ret_data(code, result)
def ThemesByNewsLF(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据,该API以获取新闻关联的主题(getThemesByNews)为基础、进行过滤优化。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSLF%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def ThemesByNewsMF(self, insertDate='', newsID='', beginTime='', endTime='', field=''):
"""
获取新闻关联的主题数据,该API以获取新闻关联的主题(优化后)(getThemesByNewsLF)为基础、再次进行过滤优化,是所有获取新闻关联的主题API中最严格的优化结果、数据量也最少。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSMF%(insertDate, newsID, beginTime, endTime, field))
return _ret_data(code, result)
def NewsInfoByInsertTime(self, newsInsertDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内入库的新闻基本信息。输入新闻入库的日期、起止时间,获取该时间段内新入库的新闻相关信息,如:新闻ID、标题、摘要、初始来源、作者、发布来源、发布时间、新闻入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSINFOBYINSERTTIME%(newsInsertDate, beginTime, endTime, field))
return _ret_data(code, result)
def NewsContentByInsertTime(self, newsInsertDate='', beginTime='', endTime='', field=''):
"""
获取某天某一段时间内入库的新闻全文等信息。输入新闻入库的日期、起止时间,获取该时间段内新入库的新闻全文等信息,如:新闻ID、标题、摘要、正文、来源链接、初始来源、作者、发布来源、发布时间、新闻入库时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新。)
"""
code, result = self.client.getData(vs.NEWSCONTENTBYINSERTTIME%(newsInsertDate, beginTime, endTime, field))
return _ret_data(code, result)
def SocialDataGuba(self, beginDate='', endDate='', ticker='', field=''):
"""
包含证券在股吧社交中的热度统计数据,输入一个或多个证券交易代码、统计起止日期,该证券在一段时间内每天相关的股吧帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALDATAGUBA%(beginDate, endDate, ticker, field))
return _ret_data(code, result)
def SocialThemeDataGuba(self, beginDate='', endDate='', themeID='', field=''):
"""
包含主题在股吧社交中的热度统计数据,输入一个或多个主题代码、统计起止日期,获取该主题在一段时间内每天相关的股吧帖子数量、帖子占比(%)。(注:数据自2014/1/1始,按日更新。)
"""
code, result = self.client.getData(vs.SOCIALTHEMEDATAGUBA%(beginDate, endDate, themeID, field))
return _ret_data(code, result)
def ThemesByNewsTime(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIME%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ThemesByNewsTimeCompanyRel(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,只包含与公司相关的新闻。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIMECOMPANYREL%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ThemesByNewsTimeLF(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,该API以获取新闻关联的主题(getThemesByNewsTime)为基础、进行过滤优化。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIMELF%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ThemesByNewsTimeMF(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,该API以获取新闻关联的主题(优化后)(getThemesByNewsTimeLF)为基础、再次进行过滤优化,是所有获取新闻关联的主题API中最严格的优化结果、数据量也最少。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/04/07。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIMEMF%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def ReportContentByID(self, reportID='', field=''):
"""
根据公告ID获取公告原始内容数据,输入公告ID,获取公告原文等信息,包括公告ID、公告名称、证券交易场所、证券交易所对公告的原始分类、公告发布时间、公告具体内容、公告链接、公告入库时间。(注:公告数据自2000/1/8始,按日更新)
"""
code, result = self.client.getData(vs.REPORTCONTENTBYID%(reportID, field))
return _ret_data(code, result)
def ThemesByNews2(self, insertBeginTime='', insertEndTime='', newsID='', field=''):
"""
获取新闻关联的主题数据,原API(获取新闻关联的主题数据-getThemesByNews)的升级版。输入新闻ID或新闻与主题的关联数据入库起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/06/17)
"""
code, result = self.client.getData(vs.THEMESBYNEWS2%(insertBeginTime, insertEndTime, newsID, field))
return _ret_data(code, result)
def ThemesByNewsTime2(self, publishBeginTime='', publishEndTime='', field=''):
"""
根据发布时间获取新闻关联的主题数据,原API(根据发布时间获取新闻关联的主题数据-getThemesByNewsTime)的升级版。输入新闻发布的起止时间,可以获取相关的主题信息,如:主题ID、主题名称,同时返回新闻标题、新闻发布时间、关联数据入库时间、更新时间等。(注:1、自2014/1/1起新闻来源众多、新闻量日均4万左右,2013年及之前的网站来源少、新闻数据量少;2、数据实时更新;3、关联数据入库起始时间为2015/06/17。)
"""
code, result = self.client.getData(vs.THEMESBYNEWSTIME2%(publishBeginTime, publishEndTime, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
|
bsd-3-clause
|
eickenberg/scikit-learn
|
examples/decomposition/plot_ica_blind_source_separation.py
|
349
|
2228
|
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
|
bsd-3-clause
|
dkdbProjects/server-result-sender
|
defects.py
|
1
|
2531
|
#!/usr/bin/python
# Import the necessary modules and libraries
import threading
import time
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
defects_regr = ()
#np.set_printoptions(precision=3, suppress=True)
def aver_std_array( data, values ):
# initialize new_data array
new_data = ()
# round by
data = np.around(data, decimals=1)
# reshape in 'rows' = 'len(data)/values', columns = 'values'
rows = len(data)/values
data.resize(rows*values, 1)
data = np.array(data).reshape(rows, values)
# this axis need to get line regrassion parameters
for row in data:
new_data = np.append(new_data, [np.average(row), np.std(row)])
# print np.array([np.average(row), np.std(row)]);
return new_data
def label_array( data, values ):
new_data = ()
data = np.array(data).astype(int)
data.resize(len(data)/values, values)
for row in data:
counts = np.bincount(row)
# print np.argmax(counts)
new_data = np.append(new_data, [np.argmax(counts)])
return new_data
defects_time_index = 0
defects_time_prev = 0
def find_actions(data, times):
# TODO: static vars in C-style?
global defects_time_index
global defects_time_prev
delta_time = times[defects_time_index] - defects_time_prev
row = data[defects_time_index]
result = predict_defect(row, delta_time/1000)
defects_time_prev = times[defects_time_index]
print "Time: %f" % defects_time_prev
defects_time_index += 1
if defects_time_index < len(times) :
next_call_time = (times[defects_time_index] - defects_time_prev)/1000.0
print "Next call: %f" % next_call_time
threading.Timer(next_call_time, find_actions, [data, times]).start()
else :
print len(times)
print "time is out %d" % defects_time_index
return result
def predict_defect( data, time):
global defects_regr
data = np.array(data).reshape(1, 2)
predicted_test = defects_regr.predict(data)
acceleration = data.item((0, 0))
print "Predicted %d" % predicted_test[0]
return predicted_test[0]
def init_defects_module(values, trees, data, labels):
# Fit regression model
global defects_regr
defects_regr = RandomForestClassifier(n_estimators=trees)
defects_regr.fit(data[:, [0,1]], labels)
print "init_defects_module: ", defects_regr.feature_importances_
return
def predicted(data):
return defects_regr.predict(data)
|
gpl-3.0
|
jmontoyam/mne-python
|
examples/preprocessing/plot_find_eog_artifacts.py
|
24
|
1228
|
"""
==================
Find EOG artifacts
==================
Locate peaks of EOG to spot blinks and general EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 998
eog_events = mne.preprocessing.find_eog_events(raw, event_id)
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
exclude='bads')
tmin, tmax = -0.2, 0.2
epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
print("Number of detected EOG artifacts : %d" % len(data))
###############################################################################
# Plot EOG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('EOG (muV)')
plt.show()
|
bsd-3-clause
|
smartscheduling/scikit-learn-categorical-tree
|
sklearn/metrics/pairwise.py
|
13
|
41710
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
tectronics/agpy
|
doc/conf.py
|
6
|
9638
|
# -*- coding: utf-8 -*-
#
# agpy documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 21 22:31:14 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
from astropy.sphinx.conf import *
del html_style # I don't want theirs because I don't have it
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'numpydoc',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx']
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.doctest',
numpydoc_show_class_members = False
try:
import matplotlib.sphinxext.plot_directive
extensions += [matplotlib.sphinxext.plot_directive.__name__]
except ImportError:
warnings.warn(
"matplotlib's plot_directive could not be imported. " +
"Inline plots will not be included in the output")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'agpy'
copyright = u'2011, Adam Ginsburg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from agpy import __version__ as version
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
html_style = 'extra.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = dict(
pagewidth = '1000px',
documentwidth = '760px',
sidebarwidth = '200px',
headerbg="#666666",
headercolor1="#000000",
headercolor2="#000000",
headerlinkcolor="#FF9522",
linkcolor="#4a8f43",
textalign='left',
)
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static','_static/extra.css','_static/scipy.css','_static/astropy.css']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agpydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'agpy.tex', u'agpy Documentation',
u'Adam Ginsburg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'agpy', u'agpy Documentation',
[u'Adam Ginsburg'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'agpy'
epub_author = u'Adam Ginsburg'
epub_publisher = u'Adam Ginsburg'
epub_copyright = u'2011, Adam Ginsburg'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
# imported from astropy
#intersphinx_mapping = {'python':('http://docs.python.org/', None),
# 'numpy':('http://docs.scipy.org/doc/','http://docs.scipy.org/doc/numpy/objects.inv'),
# 'np':('http://docs.scipy.org/doc/','http://docs.scipy.org/doc/numpy/objects.inv'),
# }
|
mit
|
xiaoxiamii/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
268
|
1091
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_gdk.py
|
10
|
17086
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as np
import matplotlib
from matplotlib import rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, restrict_dict, warn_deprecated
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(np.round(x)), int(np.round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
rows, cols = im.shape[:2]
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:, :, :] = im[::-1]
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
x, y = int(x), int(y)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
if (x + w > self.width or y + h > self.height):
return
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = np.zeros((N,1), np.uint8)
image_str = font_image.as_str()
Xall[:,0] = np.fromstring(image_str, np.uint8)
# get the max alpha at each pixel
Xs = np.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if (x < 0 or y < 0 or # window has shrunk and text is off the edge
x + w > self.width or y + h > self.height):
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = np.round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
ll, lb, lw, lh = logicalRect
return w, h + 1, h - lh
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(np.asarray(dash_list))
dl = [max(1, int(np.round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGBA=False):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(np.round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGDK(figure)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
if self.__class__ == matplotlib.backends.backend_gdk.FigureCanvasGDK:
warn_deprecated('2.0', message="The GDK backend is "
"deprecated. It is untested, known to be "
"broken and will be removed in Matplotlib 2.2. "
"Use the Agg backend instead. "
"See Matplotlib usage FAQ for"
" more info on backends.",
alternative="Agg")
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
pixbuf.save(filename, format, options=options)
|
gpl-3.0
|
bcimontreal/bci_workshop
|
python/bci_workshop_tools.py
|
1
|
12476
|
# -*- coding: utf-8 -*-
"""
BCI Workshop Auxiliary Tools
Created on Fri May 08 15:34:59 2015
@author: Cassani
"""
import os
import sys
from tempfile import gettempdir
from subprocess import call
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm
from scipy.signal import butter, lfilter, lfilter_zi
NOTCH_B, NOTCH_A = butter(4, np.array([55, 65])/(256/2), btype='bandstop')
def plot_multichannel(data, params=None):
"""Create a plot to present multichannel data.
Args:
data (numpy.ndarray): Multichannel Data [n_samples, n_channels]
params (dict): information about the data acquisition device
TODO Receive labels as arguments
"""
fig, ax = plt.subplots()
n_samples = data.shape[0]
n_channels = data.shape[1]
if params is not None:
fs = params['sampling frequency']
names = params['names of channels']
else:
fs = 1
names = [''] * n_channels
time_vec = np.arange(n_samples) / float(fs)
data = np.fliplr(data)
offset = 0
for i_channel in range(n_channels):
data_ac = data[:, i_channel] - np.mean(data[:, i_channel])
offset = offset + 2 * np.max(np.abs(data_ac))
ax.plot(time_vec, data_ac + offset, label=names[i_channel])
ax.set_xlabel('Time [s]')
ax.set_ylabel('Amplitude')
plt.legend()
plt.draw()
def epoch(data, samples_epoch, samples_overlap=0):
"""Extract epochs from a time series.
Given a 2D array of the shape [n_samples, n_channels]
Creates a 3D array of the shape [wlength_samples, n_channels, n_epochs]
Args:
data (numpy.ndarray or list of lists): data [n_samples, n_channels]
samples_epoch (int): window length in samples
samples_overlap (int): Overlap between windows in samples
Returns:
(numpy.ndarray): epoched data of shape
"""
if isinstance(data, list):
data = np.array(data)
n_samples, n_channels = data.shape
samples_shift = samples_epoch - samples_overlap
n_epochs = int(np.floor((n_samples - samples_epoch) / float(samples_shift)) + 1)
# Markers indicate where the epoch starts, and the epoch contains samples_epoch rows
markers = np.asarray(range(0, n_epochs + 1)) * samples_shift
markers = markers.astype(int)
# Divide data in epochs
epochs = np.zeros((samples_epoch, n_channels, n_epochs))
for i in range(0, n_epochs):
epochs[:, :, i] = data[markers[i]:markers[i] + samples_epoch, :]
return epochs
def compute_feature_vector(eegdata, fs):
"""Extract the features from the EEG.
Args:
eegdata (numpy.ndarray): array of dimension [number of samples,
number of channels]
fs (float): sampling frequency of eegdata
Returns:
(numpy.ndarray): feature matrix of shape [number of feature points,
number of different features]
"""
# 1. Compute the PSD
winSampleLength, nbCh = eegdata.shape
# Apply Hamming window
w = np.hamming(winSampleLength)
dataWinCentered = eegdata - np.mean(eegdata, axis=0) # Remove offset
dataWinCenteredHam = (dataWinCentered.T*w).T
NFFT = nextpow2(winSampleLength)
Y = np.fft.fft(dataWinCenteredHam, n=NFFT, axis=0)/winSampleLength
PSD = 2*np.abs(Y[0:int(NFFT/2), :])
f = fs/2*np.linspace(0, 1, int(NFFT/2))
# SPECTRAL FEATURES
# Average of band powers
# Delta <4
ind_delta, = np.where(f < 4)
meanDelta = np.mean(PSD[ind_delta, :], axis=0)
# Theta 4-8
ind_theta, = np.where((f >= 4) & (f <= 8))
meanTheta = np.mean(PSD[ind_theta, :], axis=0)
# Alpha 8-12
ind_alpha, = np.where((f >= 8) & (f <= 12))
meanAlpha = np.mean(PSD[ind_alpha, :], axis=0)
# Beta 12-30
ind_beta, = np.where((f >= 12) & (f < 30))
meanBeta = np.mean(PSD[ind_beta, :], axis=0)
feature_vector = np.concatenate((meanDelta, meanTheta, meanAlpha,
meanBeta), axis=0)
feature_vector = np.log10(feature_vector)
return feature_vector
def nextpow2(i):
"""
Find the next power of 2 for number i
"""
n = 1
while n < i:
n *= 2
return n
def compute_feature_matrix(epochs, fs):
"""
Call compute_feature_vector for each EEG epoch
"""
n_epochs = epochs.shape[2]
for i_epoch in range(n_epochs):
if i_epoch == 0:
feat = compute_feature_vector(epochs[:, :, i_epoch], fs).T
feature_matrix = np.zeros((n_epochs, feat.shape[0])) # Initialize feature_matrix
feature_matrix[i_epoch, :] = compute_feature_vector(
epochs[:, :, i_epoch], fs).T
return feature_matrix
def train_classifier(feature_matrix_0, feature_matrix_1, algorithm='SVM'):
"""Train a binary classifier.
Train a binary classifier. First perform Z-score normalization, then
fit
Args:
feature_matrix_0 (numpy.ndarray): array of shape (n_samples,
n_features) with examples for Class 0
feature_matrix_0 (numpy.ndarray): array of shape (n_samples,
n_features) with examples for Class 1
alg (str): Type of classifer to use. Currently only SVM is
supported.
Returns:
(sklearn object): trained classifier (scikit object)
(numpy.ndarray): normalization mean
(numpy.ndarray): normalization standard deviation
"""
# Create vector Y (class labels)
class0 = np.zeros((feature_matrix_0.shape[0], 1))
class1 = np.ones((feature_matrix_1.shape[0], 1))
# Concatenate feature matrices and their respective labels
y = np.concatenate((class0, class1), axis=0)
features_all = np.concatenate((feature_matrix_0, feature_matrix_1),
axis=0)
# Normalize features columnwise
mu_ft = np.mean(features_all, axis=0)
std_ft = np.std(features_all, axis=0)
X = (features_all - mu_ft) / std_ft
# Train SVM using default parameters
clf = svm.SVC()
clf.fit(X, y)
score = clf.score(X, y.ravel())
# Visualize decision boundary
# plot_classifier_training(clf, X, y, features_to_plot=[0, 1])
return clf, mu_ft, std_ft, score
def test_classifier(clf, feature_vector, mu_ft, std_ft):
"""Test the classifier on new data points.
Args:
clf (sklearn object): trained classifier
feature_vector (numpy.ndarray): array of shape (n_samples,
n_features)
mu_ft (numpy.ndarray): normalization mean
std_ft (numpy.ndarray): normalization standard deviation
Returns:
(numpy.ndarray): decision of the classifier on the data points
"""
# Normalize feature_vector
x = (feature_vector - mu_ft) / std_ft
y_hat = clf.predict(x)
return y_hat
def beep(waveform=(79, 45, 32, 50, 99, 113, 126, 127)):
"""Play a beep sound.
Cross-platform sound playing with standard library only, no sound
file required.
From https://gist.github.com/juancarlospaco/c295f6965ed056dd08da
"""
wavefile = os.path.join(gettempdir(), "beep.wav")
if not os.path.isfile(wavefile) or not os.access(wavefile, os.R_OK):
with open(wavefile, "w+") as wave_file:
for sample in range(0, 300, 1):
for wav in range(0, 8, 1):
wave_file.write(chr(waveform[wav]))
if sys.platform.startswith("linux"):
return call("chrt -i 0 aplay '{fyle}'".format(fyle=wavefile),
shell=1)
if sys.platform.startswith("darwin"):
return call("afplay '{fyle}'".format(fyle=wavefile), shell=True)
if sys.platform.startswith("win"): # FIXME: This is Ugly.
return call("start /low /min '{fyle}'".format(fyle=wavefile),
shell=1)
def get_feature_names(ch_names):
"""Generate the name of the features.
Args:
ch_names (list): electrode names
Returns:
(list): feature names
"""
bands = ['delta', 'theta', 'alpha', 'beta']
feat_names = []
for band in bands:
for ch in range(len(ch_names)):
feat_names.append(band + '-' + ch_names[ch])
return feat_names
def update_buffer(data_buffer, new_data, notch=False, filter_state=None):
"""
Concatenates "new_data" into "data_buffer", and returns an array with
the same size as "data_buffer"
"""
if new_data.ndim == 1:
new_data = new_data.reshape(-1, data_buffer.shape[1])
if notch:
if filter_state is None:
filter_state = np.tile(lfilter_zi(NOTCH_B, NOTCH_A),
(data_buffer.shape[1], 1)).T
new_data, filter_state = lfilter(NOTCH_B, NOTCH_A, new_data, axis=0,
zi=filter_state)
new_buffer = np.concatenate((data_buffer, new_data), axis=0)
new_buffer = new_buffer[new_data.shape[0]:, :]
return new_buffer, filter_state
def get_last_data(data_buffer, newest_samples):
"""
Obtains from "buffer_array" the "newest samples" (N rows from the
bottom of the buffer)
"""
new_buffer = data_buffer[(data_buffer.shape[0] - newest_samples):, :]
return new_buffer
class DataPlotter():
"""
Class for creating and updating a line plot.
"""
def __init__(self, nbPoints, chNames, fs=None, title=None):
"""Initialize the figure."""
self.nbPoints = nbPoints
self.chNames = chNames
self.nbCh = len(self.chNames)
self.fs = 1 if fs is None else fs
self.figTitle = '' if title is None else title
data = np.empty((self.nbPoints, 1))*np.nan
self.t = np.arange(data.shape[0])/float(self.fs)
# Create offset parameters for plotting multiple signals
self.yAxisRange = 100
self.chRange = self.yAxisRange/float(self.nbCh)
self.offsets = np.round((np.arange(self.nbCh)+0.5)*(self.chRange))
# Create the figure and axis
plt.ion()
self.fig, self.ax = plt.subplots()
self.ax.set_yticks(self.offsets)
self.ax.set_yticklabels(self.chNames)
# Initialize the figure
self.ax.set_title(self.figTitle)
self.chLinesDict = {}
for i, chName in enumerate(self.chNames):
self.chLinesDict[chName], = self.ax.plot(
self.t, data+self.offsets[i], label=chName)
self.ax.set_xlabel('Time')
self.ax.set_ylim([0, self.yAxisRange])
self.ax.set_xlim([np.min(self.t), np.max(self.t)])
plt.show()
def update_plot(self, data):
""" Update the plot """
data = data - np.mean(data, axis=0)
std_data = np.std(data, axis=0)
std_data[np.where(std_data == 0)] = 1
data = data/std_data*self.chRange/5.0
for i, chName in enumerate(self.chNames):
self.chLinesDict[chName].set_ydata(data[:, i] + self.offsets[i])
self.fig.canvas.draw()
def clear(self):
""" Clear the figure """
blankData = np.empty((self.nbPoints, 1))*np.nan
for i, chName in enumerate(self.chNames):
self.chLinesDict[chName].set_ydata(blankData)
self.fig.canvas.draw()
def close(self):
""" Close the figure """
plt.close(self.fig)
def plot_classifier_training(clf, X, y, features_to_plot=[0, 1]):
"""Visualize the decision boundary of a classifier.
Args:
clf (sklearn object): trained classifier
X (numpy.ndarray): data to visualize the decision boundary for
y (numpy.ndarray): labels for X
Keyword Args:
features_to_plot (list): indices of the two features to use for
plotting
Inspired from: http://scikit-learn.org/stable/auto_examples/tree/plot_iris.html
"""
plot_colors = "bry"
plot_step = 0.02
n_classes = len(np.unique(y))
x_min = np.min(X[:, 1])-1
x_max = np.max(X[:, 1])+1
y_min = np.min(X[:, 0])-1
y_max = np.max(X[:, 0])+1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
fig, ax = plt.subplots()
ax.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.5)
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
ax.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
plt.axis('tight')
|
mit
|
etkirsch/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
171
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
mmottahedi/nilmtk
|
nilmtk/dataset_converters/eco/convert_eco.py
|
6
|
7138
|
import pandas as pd
import numpy as np
import sys
from os import listdir, getcwd
from os.path import isdir, join, dirname, abspath
from pandas.tools.merge import concat
from nilmtk.utils import get_module_directory, check_directory_exists
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
from sys import getfilesystemencoding
"""
DATASET STRUCTURE:
------------------
On extracting all the dataset values, we should arrive at a similar directory structure as
mentioned.
ECO Dataset will have a folder '<i>_sm_csv' and '<i>_plug_csv' where i is the building no.
<i>_sm_csv has a folder 01
<i>_plug_csv has a folder 01, 02,....<n> where n is the plug numbers.
Each folder has a CSV file as per each day, with each day csv file containing
86400 entries.
"""
plugs_column_name = {1:('power', 'active'),
};
def convert_eco(dataset_loc, hdf_filename, timezone):
"""
Parameters:
-----------
dataset_loc: str
The root directory where the dataset is located.
hdf_filename: str
The location where the hdf_filename is present.
The directory location has to contain the
hdf5file name for the converter to work.
timezone: str
specifies the timezone of the dataset.
"""
# Creating a new HDF File
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='blosc')
check_directory_exists(dataset_loc)
directory_list = [i for i in listdir(dataset_loc) if '.txt' not in i]
directory_list.sort()
print directory_list
# Traversing every folder
for folder in directory_list:
if folder[0] == '.' or folder[-3:] == '.h5':
print 'Skipping ', folder
continue
print 'Computing for folder',folder
#Building number and meter_flag
building_no = int(folder[:2])
meter_flag = 'sm' if 'sm_csv' in folder else 'plugs'
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
dir_list.sort()
print 'Current dir list:',dir_list
for fl in dir_list:
print 'Computing for folder ',fl
fl_dir_list = [i for i in listdir(join(dataset_loc,folder,fl)) if '.csv' in i]
fl_dir_list.sort()
if meter_flag == 'sm':
for fi in fl_dir_list:
df = pd.read_csv(join(dataset_loc,folder,fl,fi), names=[i for i in range(1,17)], dtype=np.float32)
for phase in range(1,4):
key = str(Key(building=building_no, meter=phase))
df_phase = df.ix[:,[1+phase, 5+phase, 8+phase, 13+phase]]
# get reactive power
power = df_phase.as_matrix([1+phase, 13+phase])
reactive = power[:,0] * np.tan(power[:,1] * np.pi / 180)
df_phase['Q'] = reactive
df_phase.index = pd.DatetimeIndex(start=fi[:-4], freq='s', periods=86400, tz='GMT')
df_phase = df_phase.tz_convert(timezone)
sm_column_name = {1+phase:('power', 'active'),
5+phase:('current', ''),
8+phase:('voltage', ''),
13+phase:('phase_angle', ''),
'Q': ('power', 'reactive'),
};
df_phase.rename(columns=sm_column_name, inplace=True)
tmp_before = np.size(df_phase.power.active)
df_phase = df_phase[df_phase.power.active != -1]
tmp_after = np.size(df_phase.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
df_phase.columns.set_names(LEVEL_NAMES, inplace=True)
if not key in store:
store.put(key, df_phase, format='Table')
else:
store.append(key, df_phase, format='Table')
store.flush()
print 'Building',building_no,', Meter no.',phase,'=> Done for ',fi[:-4]
else:
#Meter number to be used in key
meter_num = int(fl) + 3
key = str(Key(building=building_no, meter=meter_num))
#Getting dataframe for each csv file seperately
for fi in fl_dir_list:
df = pd.read_csv(join(dataset_loc,folder,fl ,fi), names=[1], dtype=np.float64)
df.index = pd.DatetimeIndex(start=fi[:-4], freq='s', periods=86400, tz = 'GMT')
df.rename(columns=plugs_column_name, inplace=True)
df = df.tz_convert(timezone)
df.columns.set_names(LEVEL_NAMES, inplace=True)
tmp_before = np.size(df.power.active)
df = df[df.power.active != -1]
tmp_after = np.size(df.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
# If table not present in hdf5, create or else append to existing data
if not key in store:
store.put(key, df, format='Table')
print 'Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4]
else:
store.append(key, df, format='Table')
store.flush()
print 'Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4]
print "Data storage completed."
store.close()
# Adding the metadata to the HDF5file
print "Proceeding to Metadata conversion..."
meta_path = join(_get_module_directory(), 'metadata')
convert_yaml_to_hdf5(meta_path, hdf_filename)
print "Completed Metadata conversion."
def _get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
|
apache-2.0
|
rafaelvalle/MDI
|
nnet_lasagne.py
|
1
|
10609
|
# code adapted from lasagne tutorial
# http://lasagne.readthedocs.org/en/latest/user/tutorial.html
import time
import os
from itertools import product
import numpy as np
from sklearn.cross_validation import KFold
import theano
from theano import tensor as T
import lasagne
from params import nnet_params_dict, feats_train_folder
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def build_network(input_var, input_shape, nonlins, depth=2,
widths=(1000, 1000, 10), drops=(0.2, 0.5)):
"""
Parameters
----------
input_var : Theano symbolic variable or None (default: None)
Variable representing a network input.
input_shape : tuple of int or None (batchsize, rows, cols)
input_shape of the input. Any element can be set to None to indicate
that dimension is not fixed at compile time
"""
# GlorotUniform is the default mechanism for initializing weights
for i in range(depth):
if i == 0:
network = lasagne.layers.InputLayer(shape=input_shape,
input_var=input_var)
else:
network = lasagne.layers.DenseLayer(network,
widths[i],
nonlinearity=nonlins[i])
if drops[i] != None:
network = lasagne.layers.DropoutLayer(network, p=drops[i])
return network
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def zerosX(X):
return np.zeros(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def sgd(cost, params, gamma):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
updates.append([p, p - g * gamma])
return updates
def model(X, w_h, w_o):
h = T.nnet.sigmoid(T.dot(X, w_h))
pyx = T.nnet.softmax(T.dot(h, w_o))
return pyx
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def batch_ids(batch_size, x_train, train_idx):
# change to iterator
ids = zip(range(0, len(x_train[train_idx]), batch_size),
range(batch_size, len(x_train[train_idx]), batch_size))
return ids
verbose = True
# train on every perturbed dataset
filepaths = np.loadtxt("include_data.csv", dtype=object, delimiter=",")
for (include, train_filename, test_filename) in filepaths:
if include == '1':
print '\nExecuting {}'.format(train_filename)
# Load training and test sets
x_train = np.load(os.path.join(feats_train_folder,
train_filename)).astype(np.float32)
y_train = x_train[:, -1].astype(int)
# y_train = (np.eye(2, dtype=np.float32)[x_train[:,-1].astype(int)])
# remove label column from x_train
x_train = x_train[:, :-1]
# Network topology
n_obs = x_train.shape[0]
n_inputs = x_train.shape[1]
n_outputs = len(np.unique(y_train))
# Cross-validation and Neural Net parameters
n_folds = nnet_params_dict['n_folds']
alphas = nnet_params_dict['alphas']
gammas = nnet_params_dict['gammas']
decay_rate = nnet_params_dict['decay_rate']
batch_sizes = nnet_params_dict['batch_sizes']
max_epoch = nnet_params_dict['max_epoch']
depth = nnet_params_dict['depth']
widths = nnet_params_dict['widths']
nonlins = nnet_params_dict['nonlins']
drops = nnet_params_dict['drops']
# Dictionary to store results
results_dict = {}
params_mat = [x for x in product(alphas, gammas, batch_sizes)]
params_mat = np.array(params_mat, dtype=theano.config.floatX)
params_mat = np.column_stack((params_mat,
zerosX(params_mat.shape[0]),
zerosX(params_mat.shape[0]),
zerosX(params_mat.shape[0])))
for param_idx in xrange(params_mat.shape[0]):
# load parameters for neural network model
alpha = params_mat[param_idx, 0]
gamma = params_mat[param_idx, 1]
batch_size = int(params_mat[param_idx, 2])
shape = (batch_size, x_train.shape[1])
# choose n_hidden nodes according to ...
n_hidden = int((n_obs / depth) / (alpha*(n_inputs+n_outputs)))
for i in range(1, depth-1):
widths[i] = n_hidden
model_str = ('\nalpha {} gamma {} batch size {} '
'n_hidden {} depth {}'
'\nnonlins {}'
'\ndrops {}'.format(alpha, gamma, batch_size,
n_hidden, depth, nonlins,
drops))
print model_str
# specify input and target theano data types
input_var = T.fmatrix('input')
target_var = T.ivector('target')
# build neural network model
network = build_network(input_var, shape, nonlins, depth, widths,
drops)
# create loss expression for training
"""
py_x = model(input_var, w_h, w_o)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, target_var),
dtype=theano.config.floatX)
"""
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction,
target_var)
loss = loss.mean()
# create paraneter update expressions for training
"""
params = [w_h, w_o]
updates = sgd(cost, params, gamma=gamma)
"""
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.adadelta(loss, params,
learning_rate=gamma,
rho=decay_rate)
# create loss expression for validation and classification accuracy
# Deterministic forward pass to disable droupout layers
test_prediction = lasagne.layers.get_output(network,
deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(
test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1),
target_var), dtype=theano.config.floatX)
# compile functions for performing training step and returning
# corresponding training loss
train_fn = theano.function(inputs=[input_var, target_var],
outputs=loss,
updates=updates,
allow_input_downcast=True)
# compile a function to compute the validation loss and accuracy
val_fn = theano.function(inputs=[input_var, target_var],
outputs=[test_loss, test_acc],
allow_input_downcast=True)
# create kfold iterator
kf = KFold(x_train.shape[0], n_folds=n_folds)
error_rates = []
val_losses = []
running_time = []
fold = 1
for train_idx, val_idx in kf:
start_time = time.time()
for i in range(max_epoch):
train_err = 0
train_batches = 0
for start, end in batch_ids(batch_size, x_train,
train_idx):
train_err += train_fn(x_train[train_idx][start:end],
y_train[train_idx][start:end])
train_batches += 1
val_err = 0
val_acc = 0
val_batches = 0
for start, end in batch_ids(batch_size, x_train,
train_idx):
err, acc = val_fn(x_train[val_idx], y_train[val_idx])
val_err += err
val_acc += acc
val_batches += 1
error_rate = (1 - (val_acc / val_batches)) * 100
val_loss = val_err / val_batches
print("Final results:")
print(" val loss:\t\t\t{:.6f}".format(val_loss))
print(" val error rate:\t\t{:.2f} %".format(error_rate))
error_rates.append(error_rate)
val_losses.append(val_loss)
running_time.append(np.around((time.time() -
start_time) / 60., 1))
fold += 1
params_mat[param_idx, 3] = np.mean(error_rates)
params_mat[param_idx, 4] = np.mean(val_losses)
params_mat[param_idx, 5] = np.mean(running_time)
print('alpha {} gamma {} batchsize {} error rate {} '
'validation cost {} '
'running time {}'.format(params_mat[param_idx, 0],
params_mat[param_idx, 1],
params_mat[param_idx, 2],
params_mat[param_idx, 3],
params_mat[param_idx, 4],
params_mat[param_idx, 5]))
# Save params matrix to disk
params_mat.dump(('results/train/{}'
'_results.np').format(train_filename[:-3]))
|
mit
|
maxlikely/scikit-learn
|
sklearn/decomposition/tests/test_factor_analysis.py
|
7
|
1674
|
# Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
fa = FactorAnalysis(n_components=n_components)
fa.fit(X)
X_t = fa.transform(X)
assert_true(X_t.shape == (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score(X).sum())
# Make log likelihood increases at each iteration
assert_true(np.all(np.diff(fa.loglike_) > 0.))
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_true(diff < 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
|
bsd-3-clause
|
xya/sms-tools
|
lectures/06-Harmonic-model/plots-code/oboe-spectrum.py
|
24
|
1032
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
w = np.blackman(651)
N = 1024
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
mX, pX = DFT.dftAnal(x1, w, N)
plt.figure(1, figsize=(9, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2)/float(fs), x1, lw=1.5)
plt.axis([-hM1/float(fs), hM2/float(fs), min(x1), max(x1)])
plt.title('x (oboe-A4.wav)')
plt.subplot(3,1,2)
plt.plot(fs*np.arange(mX.size)/float(N), mX, 'r', lw=1.5)
plt.axis([0,fs/3,-90,max(mX)])
plt.title ('mX')
plt.subplot(3,1,3)
plt.plot(fs*np.arange(pX.size)/float(N), pX, 'c', lw=1.5)
plt.axis([0,fs/3,min(pX),18])
plt.title ('pX')
plt.tight_layout()
plt.savefig('oboe-spectrum.png')
plt.show()
|
agpl-3.0
|
apache/incubator-airflow
|
airflow/providers/google/cloud/hooks/bigquery.py
|
3
|
123182
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import hashlib
import json
import logging
import time
import warnings
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Any, Dict, Iterable, List, Mapping, NoReturn, Optional, Sequence, Tuple, Type, Union
from google.api_core.retry import Retry
from google.cloud.bigquery import (
DEFAULT_RETRY,
Client,
CopyJob,
ExternalConfig,
ExtractJob,
LoadJob,
QueryJob,
SchemaField,
)
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem, DatasetReference
from google.cloud.bigquery.table import EncryptionConfiguration, Row, Table, TableReference
from google.cloud.exceptions import NotFound
from googleapiclient.discovery import Resource, build
from pandas import DataFrame
from pandas_gbq import read_gbq
from pandas_gbq.gbq import (
GbqConnector,
_check_google_client_version as gbq_check_google_client_version,
_test_google_api_imports as gbq_test_google_api_imports,
)
from airflow.exceptions import AirflowException
from airflow.hooks.dbapi import DbApiHook
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils.helpers import convert_camel_to_snake
from airflow.utils.log.logging_mixin import LoggingMixin
log = logging.getLogger(__name__)
BigQueryJob = Union[CopyJob, QueryJob, LoadJob, ExtractJob]
# pylint: disable=too-many-public-methods
class BigQueryHook(GoogleBaseHook, DbApiHook):
"""Interact with BigQuery. This hook uses the Google Cloud connection."""
conn_name_attr = 'gcp_conn_id'
default_conn_name = 'google_cloud_default'
conn_type = 'google_cloud_platform'
hook_name = 'Google Cloud'
def __init__(
self,
gcp_conn_id: str = default_conn_name,
delegate_to: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
bigquery_conn_id: Optional[str] = None,
api_resource_configs: Optional[Dict] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
# To preserve backward compatibility
# TODO: remove one day
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=2,
)
gcp_conn_id = bigquery_conn_id
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.use_legacy_sql = use_legacy_sql
self.location = location
self.running_job_id = None # type: Optional[str]
self.api_resource_configs = api_resource_configs if api_resource_configs else {} # type Dict
def get_conn(self) -> "BigQueryConnection":
"""Returns a BigQuery PEP 249 connection object."""
service = self.get_service()
return BigQueryConnection(
service=service,
project_id=self.project_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
num_retries=self.num_retries,
hook=self,
)
def get_service(self) -> Resource:
"""Returns a BigQuery service object."""
warnings.warn(
"This method will be deprecated. Please use `BigQueryHook.get_client` method", DeprecationWarning
)
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized, cache_discovery=False)
def get_client(self, project_id: Optional[str] = None, location: Optional[str] = None) -> Client:
"""
Returns authenticated BigQuery Client.
:param project_id: Project ID for the project which the client acts on behalf of.
:type project_id: str
:param location: Default location for jobs / datasets / tables.
:type location: str
:return:
"""
return Client(
client_info=self.client_info,
project=project_id,
location=location,
credentials=self._get_credentials(),
)
@staticmethod
def _resolve_table_reference(
table_resource: Dict[str, Any],
project_id: Optional[str] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
) -> Dict[str, Any]:
try:
# Check if tableReference is present and is valid
TableReference.from_api_repr(table_resource["tableReference"])
except KeyError:
# Something is wrong so we try to build the reference
table_resource["tableReference"] = table_resource.get("tableReference", {})
values = [("projectId", project_id), ("tableId", table_id), ("datasetId", dataset_id)]
for key, value in values:
# Check if value is already present if no use the provided one
resolved_value = table_resource["tableReference"].get(key, value)
if not resolved_value:
# If there's no value in tableReference and provided one is None raise error
raise AirflowException(
f"Table resource is missing proper `tableReference` and `{key}` is None"
)
table_resource["tableReference"][key] = resolved_value
return table_resource
def insert_rows(
self,
table: Any,
rows: Any,
target_fields: Any = None,
commit_every: Any = 1000,
replace: Any = False,
**kwargs,
) -> None:
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(
self,
sql: str,
parameters: Optional[Union[Iterable, Mapping]] = None,
dialect: Optional[str] = None,
**kwargs,
) -> DataFrame:
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
:param kwargs: (optional) passed into pandas_gbq.read_gbq method
:type kwargs: dict
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
credentials, project_id = self._get_credentials_and_project_id()
return read_gbq(
sql, project_id=project_id, dialect=dialect, verbose=False, credentials=credentials, **kwargs
)
@GoogleBaseHook.fallback_to_default_project_id
def table_exists(self, dataset_id: str, table_id: str, project_id: str) -> bool:
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
self.get_client(project_id=project_id).get_table(table_reference)
return True
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def table_partition_exists(
self, dataset_id: str, table_id: str, partition_id: str, project_id: str
) -> bool:
"""
Checks for the existence of a partition in a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
:param partition_id: The name of the partition to check the existence of.
:type partition_id: str
"""
table_reference = TableReference(DatasetReference(project_id, dataset_id), table_id)
try:
return partition_id in self.get_client(project_id=project_id).list_partitions(table_reference)
except NotFound:
return False
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_table( # pylint: disable=too-many-arguments
self,
project_id: Optional[str] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
table_resource: Optional[Dict[str, Any]] = None,
schema_fields: Optional[List] = None,
time_partitioning: Optional[Dict] = None,
cluster_fields: Optional[List[str]] = None,
labels: Optional[Dict] = None,
view: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
retry: Optional[Retry] = DEFAULT_RETRY,
num_retries: Optional[int] = None,
location: Optional[str] = None,
exists_ok: bool = True,
) -> Table:
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored.
:type table_resource: Dict[str, Any]
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param retry: Optional. How to retry the RPC.
:type retry: google.api_core.retry.Retry
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param cluster_fields: [Optional] The fields used for clustering.
BigQuery supports clustering for both partitioned and
non-partitioned tables.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param num_retries: Maximum number of retries in case of connection problems.
:type num_retries: int
:param exists_ok: If ``True``, ignore "already exists" errors when creating the table.
:type exists_ok: bool
:return: Created table
"""
if num_retries:
warnings.warn("Parameter `num_retries` is deprecated", DeprecationWarning)
_table_resource: Dict[str, Any] = {}
if self.location:
_table_resource['location'] = self.location
if schema_fields:
_table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
_table_resource['timePartitioning'] = time_partitioning
if cluster_fields:
_table_resource['clustering'] = {'fields': cluster_fields}
if labels:
_table_resource['labels'] = labels
if view:
_table_resource['view'] = view
if encryption_configuration:
_table_resource["encryptionConfiguration"] = encryption_configuration
table_resource = table_resource or _table_resource
table_resource = self._resolve_table_reference(
table_resource=table_resource,
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
table = Table.from_api_repr(table_resource)
return self.get_client(project_id=project_id, location=location).create_table(
table=table, exists_ok=exists_ok, retry=retry
)
@GoogleBaseHook.fallback_to_default_project_id
def create_empty_dataset(
self,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
dataset_reference: Optional[Dict[str, Any]] = None,
exists_ok: bool = True,
) -> None:
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:type dataset_id: str
:param location: (Optional) The geographic location where the dataset should reside.
There is no default value but the dataset will be created in US if nothing is provided.
:type location: str
:param dataset_reference: Dataset reference that could be provided with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
:param exists_ok: If ``True``, ignore "already exists" errors when creating the DATASET.
:type exists_ok: bool
"""
dataset_reference = dataset_reference or {"datasetReference": {}}
for param, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
specified_param = dataset_reference["datasetReference"].get(param)
if specified_param:
if value:
self.log.info(
"`%s` was provided in both `dataset_reference` and as `%s`. "
"Using value from `dataset_reference`",
param,
convert_camel_to_snake(param),
)
continue # use specified value
if not value:
raise ValueError(
f"Please specify `{param}` either in `dataset_reference` "
f"or by providing `{convert_camel_to_snake(param)}`",
)
# dataset_reference has no param but we can fallback to default value
self.log.info(
"%s was not specified in `dataset_reference`. Will use default value %s.", param, value
)
dataset_reference["datasetReference"][param] = value
location = location or self.location
if location:
dataset_reference["location"] = dataset_reference.get("location", location)
dataset: Dataset = Dataset.from_api_repr(dataset_reference)
self.log.info('Creating dataset: %s in project: %s ', dataset.dataset_id, dataset.project)
self.get_client(location=location).create_dataset(dataset=dataset, exists_ok=exists_ok)
self.log.info('Dataset created successfully.')
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset_tables(
self,
dataset_id: str,
project_id: Optional[str] = None,
max_results: Optional[int] = None,
retry: Retry = DEFAULT_RETRY,
) -> List[Dict[str, Any]]:
"""
Get the list of tables for a given dataset.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: the dataset ID of the requested dataset.
:type dataset_id: str
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:type project_id: str
:param max_results: (Optional) the maximum number of tables to return.
:type max_results: int
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
:return: List of tables associated with the dataset.
"""
self.log.info('Start getting tables list from dataset: %s.%s', project_id, dataset_id)
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
retry=retry,
)
# Convert to a list (consumes all values)
return [t.reference.to_api_repr() for t in tables]
@GoogleBaseHook.fallback_to_default_project_id
def delete_dataset(
self,
dataset_id: str,
project_id: Optional[str] = None,
delete_contents: bool = False,
retry: Retry = DEFAULT_RETRY,
) -> None:
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset.
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:param delete_contents: If True, delete all the tables in the dataset.
If False and the dataset contains tables, the request will fail.
:type delete_contents: bool
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
"""
self.log.info('Deleting from project: %s Dataset:%s', project_id, dataset_id)
self.get_client(project_id=project_id).delete_dataset(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
delete_contents=delete_contents,
retry=retry,
not_found_ok=True,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_external_table( # pylint: disable=too-many-locals,too-many-arguments
self,
external_project_dataset_table: str,
schema_fields: List,
source_uris: List,
source_format: str = 'CSV',
autodetect: bool = False,
compression: str = 'NONE',
ignore_unknown_values: bool = False,
max_bad_records: int = 0,
skip_leading_rows: int = 0,
field_delimiter: str = ',',
quote_character: Optional[str] = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
encoding: str = "UTF-8",
src_fmt_configs: Optional[Dict] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
project_id: Optional[str] = None,
) -> None:
"""
Creates a new external table in the dataset with the data from Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table name to create external table.
If ``<project>`` is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
source_format is CSV.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data. See:
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:type encoding: str
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.create_empty_table` method with"
"pass passing the `table_resource` object. This gives more flexibility than this method.",
DeprecationWarning,
)
location = location or self.location
src_fmt_configs = src_fmt_configs or {}
source_format = source_format.upper()
compression = compression.upper()
external_config_api_repr = {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
}
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
}
src_fmt_to_param_mapping = {'CSV': 'csvOptions', 'GOOGLE_SHEETS': 'googleSheetsOptions'}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows',
'allowQuotedNewlines',
'fieldDelimiter',
'skipLeadingRows',
'quote',
'encoding',
],
'googleSheetsOptions': ['skipLeadingRows'],
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[src_fmt_to_param_mapping[source_format]]
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
external_config_api_repr[src_fmt_to_param_mapping[source_format]] = src_fmt_configs
# build external config
external_config = ExternalConfig.from_api_repr(external_config_api_repr)
if schema_fields:
external_config.schema = [SchemaField.from_api_repr(f) for f in schema_fields]
if max_bad_records:
external_config.max_bad_records = max_bad_records
# build table definition
table = Table(table_ref=TableReference.from_string(external_project_dataset_table, project_id))
table.external_data_configuration = external_config
if labels:
table.labels = labels
if encryption_configuration:
table.encryption_configuration = EncryptionConfiguration.from_api_repr(encryption_configuration)
self.log.info('Creating external table: %s', external_project_dataset_table)
self.create_empty_table(
table_resource=table.to_api_repr(), project_id=project_id, location=location, exists_ok=True
)
self.log.info('External table created successfully: %s', external_project_dataset_table)
@GoogleBaseHook.fallback_to_default_project_id
def update_table(
self,
table_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, the field value will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
The table has to contain ``tableReference`` or ``project_id``, ``dataset_id`` and ``table_id``
have to be provided.
:type table_resource: Dict[str, Any]
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
:type fields: List[str]
"""
fields = fields or list(table_resource.keys())
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
table = Table.from_api_repr(table_resource)
self.log.info('Updating table: %s', table_resource["tableReference"])
table_object = self.get_client(project_id=project_id).update_table(table=table, fields=fields)
self.log.info('Table %s.%s.%s updated successfully', project_id, dataset_id, table_id)
return table_object.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def patch_table( # pylint: disable=too-many-arguments
self,
dataset_id: str,
table_id: str,
project_id: Optional[str] = None,
description: Optional[str] = None,
expiration_time: Optional[int] = None,
external_data_configuration: Optional[Dict] = None,
friendly_name: Optional[str] = None,
labels: Optional[Dict] = None,
schema: Optional[List] = None,
time_partitioning: Optional[Dict] = None,
view: Optional[Dict] = None,
require_partition_filter: Optional[bool] = None,
encryption_configuration: Optional[Dict] = None,
) -> None:
"""
Patch information in an existing table.
It only updates fields that are provided in the request object.
Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/patch
:param dataset_id: The dataset containing the table to be patched.
:type dataset_id: str
:param table_id: The Name of the table to be patched.
:type table_id: str
:param project_id: The project containing the table to be patched.
:type project_id: str
:param description: [Optional] A user-friendly description of this table.
:type description: str
:param expiration_time: [Optional] The time when this table expires,
in milliseconds since the epoch.
:type expiration_time: int
:param external_data_configuration: [Optional] A dictionary containing
properties of a table stored outside of BigQuery.
:type external_data_configuration: dict
:param friendly_name: [Optional] A descriptive name for this table.
:type friendly_name: str
:param labels: [Optional] A dictionary containing labels associated with this table.
:type labels: dict
:param schema: [Optional] If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
The supported schema modifications and unsupported schema modification are listed here:
https://cloud.google.com/bigquery/docs/managing-table-schemas
**Example**: ::
schema=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema: list
:param time_partitioning: [Optional] A dictionary containing time-based partitioning
definition for the table.
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will patch a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False
}
:type view: dict
:param require_partition_filter: [Optional] If true, queries over the this table require a
partition filter. If false, queries over the table
:type require_partition_filter: bool
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated, please use ``BigQueryHook.update_table`` method.",
DeprecationWarning,
)
table_resource: Dict[str, Any] = {}
if description is not None:
table_resource['description'] = description
if expiration_time is not None:
table_resource['expirationTime'] = expiration_time
if external_data_configuration:
table_resource['externalDataConfiguration'] = external_data_configuration
if friendly_name is not None:
table_resource['friendlyName'] = friendly_name
if labels:
table_resource['labels'] = labels
if schema:
table_resource['schema'] = {'fields': schema}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if view:
table_resource['view'] = view
if require_partition_filter is not None:
table_resource['requirePartitionFilter'] = require_partition_filter
if encryption_configuration:
table_resource["encryptionConfiguration"] = encryption_configuration
self.update_table(
table_resource=table_resource,
fields=list(table_resource.keys()),
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
@GoogleBaseHook.fallback_to_default_project_id
def insert_all(
self,
project_id: str,
dataset_id: str,
table_id: str,
rows: List,
ignore_unknown_values: bool = False,
skip_invalid_rows: bool = False,
fail_on_error: bool = False,
) -> None:
"""
Method to stream data into BigQuery one record at a time without needing
to run a load job
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:type project_id: str
:param dataset_id: The name of the dataset where we have the table
:type dataset_id: str
:param table_id: The name of the table
:type table_id: str
:param rows: the rows to insert
:type rows: list
**Example or rows**:
rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:type ignore_unknown_values: bool
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:type skip_invalid_rows: bool
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
:type fail_on_error: bool
"""
self.log.info('Inserting %s row(s) into table %s:%s.%s', len(rows), project_id, dataset_id, table_id)
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
bq_client = self.get_client(project_id=project_id)
table = bq_client.get_table(table_ref)
errors = bq_client.insert_rows(
table=table,
rows=rows,
ignore_unknown_values=ignore_unknown_values,
skip_invalid_rows=skip_invalid_rows,
)
if errors:
error_msg = f"{len(errors)} insert error(s) occurred. Details: {errors}"
self.log.error(error_msg)
if fail_on_error:
raise AirflowException(f'BigQuery job failed. Error was: {error_msg}')
else:
self.log.info('All row(s) inserted successfully: %s:%s.%s', project_id, dataset_id, table_id)
@GoogleBaseHook.fallback_to_default_project_id
def update_dataset(
self,
fields: Sequence[str],
dataset_resource: Dict[str, Any],
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Retry = DEFAULT_RETRY,
) -> Dataset:
"""
Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param dataset_id: The id of the dataset.
:type dataset_id: str
:param fields: The properties of ``dataset`` to change (e.g. "friendly_name").
:type fields: Sequence[str]
:param project_id: The Google Cloud Project ID
:type project_id: str
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
"""
dataset_resource["datasetReference"] = dataset_resource.get("datasetReference", {})
for key, value in zip(["datasetId", "projectId"], [dataset_id, project_id]):
spec_value = dataset_resource["datasetReference"].get(key)
if value and not spec_value:
dataset_resource["datasetReference"][key] = value
self.log.info('Start updating dataset')
dataset = self.get_client(project_id=project_id).update_dataset(
dataset=Dataset.from_api_repr(dataset_resource),
fields=fields,
retry=retry,
)
self.log.info("Dataset successfully updated: %s", dataset)
return dataset
def patch_dataset(
self, dataset_id: str, dataset_resource: Dict, project_id: Optional[str] = None
) -> Dict:
"""
Patches information in an existing dataset.
It only replaces fields that are provided in the submitted dataset resource.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/patch
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided
in request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param project_id: The Google Cloud Project ID
:type project_id: str
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
warnings.warn("This method is deprecated. Please use ``update_dataset``.", DeprecationWarning)
project_id = project_id or self.project_id
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError(
"dataset_id argument must be provided and has "
"a type 'str'. You provided: {}".format(dataset_id)
)
service = self.get_service()
dataset_project_id = project_id or self.project_id
self.log.info('Start patching dataset: %s:%s', dataset_project_id, dataset_id)
dataset = (
service.datasets() # pylint: disable=no-member
.patch(
datasetId=dataset_id,
projectId=dataset_project_id,
body=dataset_resource,
)
.execute(num_retries=self.num_retries)
)
self.log.info("Dataset successfully patched: %s", dataset)
return dataset
def get_dataset_tables_list(
self,
dataset_id: str,
project_id: Optional[str] = None,
table_prefix: Optional[str] = None,
max_results: Optional[int] = None,
) -> List[Dict[str, Any]]:
"""
Method returns tables list of a BigQuery tables. If table prefix is specified,
only tables beginning by it are returned.
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The Google Cloud Project ID
:type project_id: str
:param table_prefix: Tables must begin by this prefix to be returned (case sensitive)
:type table_prefix: str
:param max_results: The maximum number of results to return in a single response page.
Leverage the page tokens to iterate through the entire collection.
:type max_results: int
:return: List of tables associated with the dataset
"""
warnings.warn("This method is deprecated. Please use ``get_dataset_tables``.", DeprecationWarning)
project_id = project_id or self.project_id
tables = self.get_client().list_tables(
dataset=DatasetReference(project=project_id, dataset_id=dataset_id),
max_results=max_results,
)
if table_prefix:
result = [t.reference.to_api_repr() for t in tables if t.table_id.startswith(table_prefix)]
else:
result = [t.reference.to_api_repr() for t in tables]
self.log.info("%s tables found", len(result))
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_datasets_list(
self,
project_id: Optional[str] = None,
include_all: bool = False,
filter_: Optional[str] = None,
max_results: Optional[int] = None,
page_token: Optional[str] = None,
retry: Retry = DEFAULT_RETRY,
) -> List[DatasetListItem]:
"""
Method returns full list of BigQuery datasets in the current project
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you try to get all datasets
:type project_id: str
:param include_all: True if results include hidden datasets. Defaults to False.
:param filter_: An expression for filtering the results by label. For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
:param filter_: str
:param max_results: Maximum number of datasets to return.
:param max_results: int
:param page_token: Token representing a cursor into the datasets. If not passed,
the API will return the first page of datasets. The token marks the beginning of the
iterator to be returned and the value of the ``page_token`` can be accessed at
``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`.
:param page_token: str
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
"""
datasets = self.get_client(project_id=project_id).list_datasets(
project=project_id,
include_all=include_all,
filter=filter_,
max_results=max_results,
page_token=page_token,
retry=retry,
)
datasets_list = list(datasets)
self.log.info("Datasets List: %s", len(datasets_list))
return datasets_list
@GoogleBaseHook.fallback_to_default_project_id
def get_dataset(self, dataset_id: str, project_id: Optional[str] = None) -> Dataset:
"""
Fetch the dataset referenced by dataset_id.
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The Google Cloud Project ID
:type project_id: str
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
dataset = self.get_client(project_id=project_id).get_dataset(
dataset_ref=DatasetReference(project_id, dataset_id)
)
self.log.info("Dataset Resource: %s", dataset)
return dataset
@GoogleBaseHook.fallback_to_default_project_id
def run_grant_dataset_view_access(
self,
source_dataset: str,
view_dataset: str,
view_table: str,
source_project: Optional[str] = None,
view_project: Optional[str] = None,
project_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param project_id: the project of the source dataset. If None,
self.project_id will be used.
:type project_id: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
if source_project:
project_id = source_project
warnings.warn(
"Parameter ``source_project`` is deprecated. Use ``project_id``.",
DeprecationWarning,
)
view_project = view_project or project_id
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': view_project, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = self.get_dataset(project_id=project_id, dataset_id=source_dataset)
# Check to see if the view we want to add already exists.
if view_access not in dataset.access_entries:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
dataset.access_entries += [view_access]
dataset = self.update_dataset(
fields=["access"], dataset_resource=dataset.to_api_repr(), project_id=project_id
)
else:
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project,
view_dataset,
view_table,
project_id,
source_dataset,
)
return dataset.to_api_repr()
@GoogleBaseHook.fallback_to_default_project_id
def run_table_upsert(
self, dataset_id: str, table_resource: Dict[str, Any], project_id: Optional[str] = None
) -> Dict[str, Any]:
"""
If the table already exists, update the existing table if not create new.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
table_id = table_resource['tableReference']['tableId']
table_resource = self._resolve_table_reference(
table_resource=table_resource, project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
tables_list_resp = self.get_dataset_tables(dataset_id=dataset_id, project_id=project_id)
if any(table['tableId'] == table_id for table in tables_list_resp):
self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id)
table = self.update_table(table_resource=table_resource)
else:
self.log.info('Table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id)
table = self.create_empty_table(
table_resource=table_resource, project_id=project_id
).to_api_repr()
return table
def run_table_delete(self, deletion_dataset_table: str, ignore_if_missing: bool = False) -> None:
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
warnings.warn("This method is deprecated. Please use `delete_table`.", DeprecationWarning)
return self.delete_table(table_id=deletion_dataset_table, not_found_ok=ignore_if_missing)
@GoogleBaseHook.fallback_to_default_project_id
def delete_table(
self,
table_id: str,
not_found_ok: bool = True,
project_id: Optional[str] = None,
) -> None:
"""
Delete an existing table from the dataset. If the table does not exist, return an error
unless not_found_ok is set to True.
:param table_id: A dotted ``(<project>.|<project>:)<dataset>.<table>``
that indicates which table will be deleted.
:type table_id: str
:param not_found_ok: if True, then return success even if the
requested table does not exist.
:type not_found_ok: bool
:param project_id: the project used to perform the request
:type project_id: str
"""
self.get_client(project_id=project_id).delete_table(
table=Table.from_string(table_id),
not_found_ok=not_found_ok,
)
self.log.info('Deleted table %s', table_id)
def get_tabledata(
self,
dataset_id: str,
table_id: str,
max_results: Optional[int] = None,
selected_fields: Optional[str] = None,
page_token: Optional[str] = None,
start_index: Optional[int] = None,
) -> List[Dict]:
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: list of rows
"""
warnings.warn("This method is deprecated. Please use `list_rows`.", DeprecationWarning)
rows = self.list_rows(dataset_id, table_id, max_results, selected_fields, page_token, start_index)
return [dict(r) for r in rows]
@GoogleBaseHook.fallback_to_default_project_id
def list_rows(
self,
dataset_id: str,
table_id: str,
max_results: Optional[int] = None,
selected_fields: Optional[Union[List[str], str]] = None,
page_token: Optional[str] = None,
start_index: Optional[int] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> List[Row]:
"""
List the rows of the table.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:param project_id: Project ID for the project which the client acts on behalf of.
:param location: Default location for job.
:return: list of rows
"""
location = location or self.location
if isinstance(selected_fields, str):
selected_fields = selected_fields.split(",")
if selected_fields:
selected_fields = [SchemaField(n, "") for n in selected_fields]
else:
selected_fields = None
table = self._resolve_table_reference(
table_resource={},
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
)
result = self.get_client(project_id=project_id, location=location).list_rows(
table=Table.from_api_repr(table),
selected_fields=selected_fields,
max_results=max_results,
page_token=page_token,
start_index=start_index,
)
return list(result)
@GoogleBaseHook.fallback_to_default_project_id
def get_schema(self, dataset_id: str, table_id: str, project_id: Optional[str] = None) -> dict:
"""
Get the schema for a given dataset and table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:param project_id: the optional project ID of the requested table.
If not provided, the connector's configured project will be used.
:return: a table schema
"""
table_ref = TableReference(dataset_ref=DatasetReference(project_id, dataset_id), table_id=table_id)
table = self.get_client(project_id=project_id).get_table(table_ref)
return {"fields": [s.to_api_repr() for s in table.schema]}
@GoogleBaseHook.fallback_to_default_project_id
def poll_job_complete(
self,
job_id: str,
project_id: Optional[str] = None,
location: Optional[str] = None,
retry: Retry = DEFAULT_RETRY,
) -> bool:
"""
Check if jobs completed.
:param job_id: id of the job.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
:param retry: How to retry the RPC.
:type retry: google.api_core.retry.Retry
:rtype: bool
"""
location = location or self.location
job = self.get_client(project_id=project_id, location=location).get_job(job_id=job_id)
return job.done(retry=retry)
def cancel_query(self) -> None:
"""Cancel all started queries that have not yet completed"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.cancel_job`.",
DeprecationWarning,
)
if self.running_job_id:
self.cancel_job(job_id=self.running_job_id)
else:
self.log.info('No running BigQuery jobs to cancel.')
@GoogleBaseHook.fallback_to_default_project_id
def cancel_job(
self,
job_id: str,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> None:
"""
Cancels a job an wait for cancellation to complete
:param job_id: id of the job.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
"""
location = location or self.location
if self.poll_job_complete(job_id=job_id):
self.log.info('No running BigQuery jobs to cancel.')
return
self.log.info('Attempting to cancel job : %s, %s', project_id, job_id)
self.get_client(location=location, project_id=project_id).cancel_job(job_id=job_id)
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts += 1
job_complete = self.poll_job_complete(job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s', project_id, job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
job_id,
)
else:
self.log.info('Waiting for canceled job with id %s to finish.', job_id)
time.sleep(5)
@GoogleBaseHook.fallback_to_default_project_id
def get_job(
self,
job_id: Optional[str] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> Union[CopyJob, QueryJob, LoadJob, ExtractJob]:
"""
Retrieves a BigQuery job. For more information see:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters. If not provided then uuid will be generated.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
"""
client = self.get_client(project_id=project_id, location=location)
job = client.get_job(job_id=job_id, project=project_id, location=location)
return job
@staticmethod
def _custom_job_id(configuration: Dict[str, Any]) -> str:
hash_base = json.dumps(configuration, sort_keys=True)
uniqueness_suffix = hashlib.md5(hash_base.encode()).hexdigest()
microseconds_from_epoch = int(
(datetime.now() - datetime.fromtimestamp(0)) / timedelta(microseconds=1)
)
return f"airflow_{microseconds_from_epoch}_{uniqueness_suffix}"
@GoogleBaseHook.fallback_to_default_project_id
def insert_job(
self,
configuration: Dict,
job_id: Optional[str] = None,
project_id: Optional[str] = None,
location: Optional[str] = None,
) -> BigQueryJob:
"""
Executes a BigQuery job. Waits for the job to complete and returns job id.
See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
:type configuration: Dict[str, Any]
:param job_id: The ID of the job. The ID must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
characters. If not provided then uuid will be generated.
:type job_id: str
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
"""
location = location or self.location
job_id = job_id or self._custom_job_id(configuration)
client = self.get_client(project_id=project_id, location=location)
job_data = {
"configuration": configuration,
"jobReference": {"jobId": job_id, "projectId": project_id, "location": location},
}
# pylint: disable=protected-access
supported_jobs = {
LoadJob._JOB_TYPE: LoadJob,
CopyJob._JOB_TYPE: CopyJob,
ExtractJob._JOB_TYPE: ExtractJob,
QueryJob._JOB_TYPE: QueryJob,
}
# pylint: enable=protected-access
job = None
for job_type, job_object in supported_jobs.items():
if job_type in configuration:
job = job_object
break
if not job:
raise AirflowException(f"Unknown job type. Supported types: {supported_jobs.keys()}")
job = job.from_api_repr(job_data, client)
self.log.info("Inserting job %s", job.job_id)
# Start the job and wait for it to complete and get the result.
job.result()
return job
def run_with_configuration(self, configuration: dict) -> str:
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
warnings.warn("This method is deprecated. Please use `BigQueryHook.insert_job`", DeprecationWarning)
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_load( # pylint: disable=too-many-locals,too-many-arguments,invalid-name
self,
destination_project_dataset_table: str,
source_uris: List,
schema_fields: Optional[List] = None,
source_format: str = 'CSV',
create_disposition: str = 'CREATE_IF_NEEDED',
skip_leading_rows: int = 0,
write_disposition: str = 'WRITE_EMPTY',
field_delimiter: str = ',',
max_bad_records: int = 0,
quote_character: Optional[str] = None,
ignore_unknown_values: bool = False,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
encoding: str = "UTF-8",
schema_update_options: Optional[Iterable] = None,
src_fmt_configs: Optional[Dict] = None,
time_partitioning: Optional[Dict] = None,
cluster_fields: Optional[List] = None,
autodetect: bool = False,
encryption_configuration: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery
table to load data into. If ``<project>`` is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
source_format is CSV.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:type encoding: str
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: Union[list, tuple, set]
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
# To provide backward compatibility
schema_update_options = list(schema_update_options or [])
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat # noqa # pylint: disable=line-too-long
if schema_fields is None and not autodetect:
raise ValueError('You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV",
"NEWLINE_DELIMITED_JSON",
"AVRO",
"GOOGLE_SHEETS",
"DATASTORE_BACKUP",
"PARQUET",
]
if source_format not in allowed_formats:
raise ValueError(
"{} is not a valid source format. "
"Please use one of the following types: {}".format(source_format, allowed_formats)
)
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = ['ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{} contains invalid schema update options."
"Please only use one or more of the following options: {}".format(
schema_update_options, allowed_schema_update_options
)
)
destination_project, destination_dataset, destination_table = _split_tablename(
table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table',
)
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values,
}
}
time_partitioning = _cleanse_time_partitioning(destination_project_dataset_table, time_partitioning)
if time_partitioning:
configuration['load'].update({'timePartitioning': time_partitioning})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
else:
self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options)
configuration['load']['schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
if encryption_configuration:
configuration["load"]["destinationEncryptionConfiguration"] = encryption_configuration
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows',
'allowQuotedNewlines',
'autodetect',
'fieldDelimiter',
'skipLeadingRows',
'ignoreUnknownValues',
'nullMarker',
'quote',
'encoding',
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': ['useAvroLogicalTypes'],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
backward_compatibility_configs = {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'ignoreUnknownValues': ignore_unknown_values,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'encoding': encoding,
}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_copy( # pylint: disable=invalid-name
self,
source_project_dataset_tables: Union[List, str],
destination_project_dataset_table: str,
write_disposition: str = 'WRITE_EMPTY',
create_disposition: str = 'CREATE_IF_NEEDED',
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables
)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = _split_tablename(
table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table',
)
source_project_dataset_tables_fixup.append(
{'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table}
)
destination_project, destination_dataset, destination_table = _split_tablename(
table_input=destination_project_dataset_table, default_project_id=self.project_id
)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
}
}
if labels:
configuration['labels'] = labels
if encryption_configuration:
configuration["copy"]["destinationEncryptionConfiguration"] = encryption_configuration
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
def run_extract(
self,
source_project_dataset_table: str,
destination_cloud_storage_uris: str,
compression: str = 'NONE',
export_format: str = 'CSV',
field_delimiter: str = ',',
print_header: bool = True,
labels: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
source_project, source_dataset, source_table = _split_tablename(
table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table',
)
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
} # type: Dict[str, Any]
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
# pylint: disable=too-many-locals,too-many-arguments, too-many-branches
def run_query(
self,
sql: str,
destination_dataset_table: Optional[str] = None,
write_disposition: str = 'WRITE_EMPTY',
allow_large_results: bool = False,
flatten_results: Optional[bool] = None,
udf_config: Optional[List] = None,
use_legacy_sql: Optional[bool] = None,
maximum_billing_tier: Optional[int] = None,
maximum_bytes_billed: Optional[float] = None,
create_disposition: str = 'CREATE_IF_NEEDED',
query_params: Optional[List] = None,
labels: Optional[Dict] = None,
schema_update_options: Optional[Iterable] = None,
priority: str = 'INTERACTIVE',
time_partitioning: Optional[Dict] = None,
api_resource_configs: Optional[Dict] = None,
cluster_fields: Optional[List[str]] = None,
location: Optional[str] = None,
encryption_configuration: Optional[Dict] = None,
) -> str:
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: Union[list, tuple, set]
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
"""
warnings.warn(
"This method is deprecated. Please use `BigQueryHook.insert_job` method.", DeprecationWarning
)
if not self.project_id:
raise ValueError("The project_id should be set")
schema_update_options = list(schema_update_options or [])
if time_partitioning is None:
time_partitioning = {}
if location:
self.location = location
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value('api_resource_configs', api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if 'query' not in configuration:
configuration['query'] = {}
else:
_validate_value("api_resource_configs['query']", configuration['query'], dict)
if sql is None and not configuration['query'].get('query', None):
raise TypeError('`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions # noqa # pylint: disable=line-too-long
allowed_schema_update_options = ['ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"]
if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):
raise ValueError(
"{} contains invalid schema update options. "
"Please only use one or more of the following "
"options: {}".format(schema_update_options, allowed_schema_update_options)
)
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError(
"schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'."
)
if destination_dataset_table:
destination_project, destination_dataset, destination_table = _split_tablename(
table_input=destination_dataset_table, default_project_id=self.project_id
)
destination_dataset_table = { # type: ignore
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
if cluster_fields:
cluster_fields = {'fields': cluster_fields} # type: ignore
query_param_list = [
(sql, 'query', None, (str,)),
(priority, 'priority', 'INTERACTIVE', (str,)),
(use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool),
(query_params, 'queryParameters', None, list),
(udf_config, 'userDefinedFunctionResources', None, list),
(maximum_billing_tier, 'maximumBillingTier', None, int),
(maximum_bytes_billed, 'maximumBytesBilled', None, float),
(time_partitioning, 'timePartitioning', {}, dict),
(schema_update_options, 'schemaUpdateOptions', None, list),
(destination_dataset_table, 'destinationTable', None, dict),
(cluster_fields, 'clustering', None, dict),
] # type: List[Tuple]
for param, param_name, param_default, param_type in query_param_list:
if param_name not in configuration['query'] and param in [None, {}, ()]:
if param_name == 'timePartitioning':
param_default = _cleanse_time_partitioning(destination_dataset_table, time_partitioning)
param = param_default
if param in [None, {}, ()]:
continue
_api_resource_configs_duplication_check(param_name, param, configuration['query'])
configuration['query'][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration['query'][param_name], param_type)
if param_name == 'schemaUpdateOptions' and param:
self.log.info("Adding experimental 'schemaUpdateOptions': %s", schema_update_options)
if param_name != 'destinationTable':
continue
for key in ['projectId', 'datasetId', 'tableId']:
if key not in configuration['query']['destinationTable']:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}"
)
configuration['query'].update(
{
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
}
)
if (
'useLegacySql' in configuration['query']
and configuration['query']['useLegacySql']
and 'queryParameters' in configuration['query']
):
raise ValueError("Query parameters are not allowed when using legacy SQL")
if labels:
_api_resource_configs_duplication_check('labels', labels, configuration)
configuration['labels'] = labels
if encryption_configuration:
configuration["query"]["destinationEncryptionConfiguration"] = encryption_configuration
job = self.insert_job(configuration=configuration, project_id=self.project_id)
self.running_job_id = job.job_id
return job.job_id
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(
self, project_id: str, service: str, reauth: bool = False, verbose: bool = False, dialect="legacy"
) -> None:
super().__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection:
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs) -> None:
self._args = args
self._kwargs = kwargs
def close(self) -> None: # noqa: D403
"""BigQueryConnection does not have anything to close"""
def commit(self) -> None: # noqa: D403
"""BigQueryConnection does not support transactions"""
def cursor(self) -> "BigQueryCursor": # noqa: D403
"""Return a new :py:class:`Cursor` object using the connection"""
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self) -> NoReturn: # noqa: D403
"""BigQueryConnection does not have transactions"""
raise NotImplementedError("BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
api_resource_configs: Optional[Dict] = None,
location: Optional[str] = None,
num_retries: int = 5,
) -> None:
super().__init__()
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
if api_resource_configs:
_validate_value("api_resource_configs", api_resource_configs, dict)
self.api_resource_configs = api_resource_configs if api_resource_configs else {} # type Dict
self.running_job_id = None # type: Optional[str]
self.location = location
self.num_retries = num_retries
self.hook = hook
def create_empty_table(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.create_empty_table(*args, **kwargs)
def create_empty_dataset(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.create_empty_dataset(*args, **kwargs)
def get_dataset_tables(self, *args, **kwargs) -> List[Dict[str, Any]]:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset_tables(*args, **kwargs)
def delete_dataset(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.delete_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.delete_dataset(*args, **kwargs)
def create_external_table(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_external_table`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.create_external_table(*args, **kwargs)
def patch_table(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_table`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.patch_table(*args, **kwargs)
def insert_all(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_all`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.insert_all(*args, **kwargs)
def update_dataset(self, *args, **kwargs) -> Dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset`",
DeprecationWarning,
stacklevel=3,
)
return Dataset.to_api_repr(self.hook.update_dataset(*args, **kwargs))
def patch_dataset(self, *args, **kwargs) -> Dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.patch_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.patch_dataset(*args, **kwargs)
def get_dataset_tables_list(self, *args, **kwargs) -> List[Dict[str, Any]]:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables_list`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset_tables_list(*args, **kwargs)
def get_datasets_list(self, *args, **kwargs) -> list:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_datasets_list`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_datasets_list(*args, **kwargs)
def get_dataset(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_dataset(*args, **kwargs)
def run_grant_dataset_view_access(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_grant_dataset_view_access`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks"
".bigquery.BigQueryHook.run_grant_dataset_view_access`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_grant_dataset_view_access(*args, **kwargs)
def run_table_upsert(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_upsert`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_table_upsert(*args, **kwargs)
def run_table_delete(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_table_delete`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_table_delete(*args, **kwargs)
def get_tabledata(self, *args, **kwargs) -> List[dict]:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_tabledata`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_tabledata(*args, **kwargs)
def get_schema(self, *args, **kwargs) -> dict:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.get_schema(*args, **kwargs)
def poll_job_complete(self, *args, **kwargs) -> bool:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.poll_job_complete(*args, **kwargs)
def cancel_query(self, *args, **kwargs) -> None:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.cancel_query`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.cancel_query(*args, **kwargs) # type: ignore # noqa
def run_with_configuration(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_with_configuration`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_with_configuration(*args, **kwargs)
def run_load(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_load`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_load(*args, **kwargs)
def run_copy(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_copy`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_copy(*args, **kwargs)
def run_extract(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_extract`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_extract(*args, **kwargs)
def run_query(self, *args, **kwargs) -> str:
"""
This method is deprecated.
Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`
"""
warnings.warn(
"This method is deprecated. "
"Please use `airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.run_query`",
DeprecationWarning,
stacklevel=3,
)
return self.hook.run_query(*args, **kwargs)
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(
self,
service: Any,
project_id: str,
hook: BigQueryHook,
use_legacy_sql: bool = True,
location: Optional[str] = None,
num_retries: int = 5,
) -> None:
super().__init__(
service=service,
project_id=project_id,
hook=hook,
use_legacy_sql=use_legacy_sql,
location=location,
num_retries=num_retries,
)
self.buffersize = None # type: Optional[int]
self.page_token = None # type: Optional[str]
self.job_id = None # type: Optional[str]
self.buffer = [] # type: list
self.all_pages_loaded = False # type: bool
@property
def description(self) -> None:
"""The schema description method is not currently implemented"""
raise NotImplementedError
def close(self) -> None:
"""By default, do nothing"""
@property
def rowcount(self) -> int:
"""By default, return -1 to indicate that this is not supported"""
return -1
def execute(self, operation: str, parameters: Optional[dict] = None) -> None:
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: str
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation, parameters) if parameters else operation
self.flush_results()
self.job_id = self.hook.run_query(sql)
def executemany(self, operation: str, seq_of_parameters: list) -> None:
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: str
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def flush_results(self) -> None:
"""Flush results related cursor attributes"""
self.page_token = None
self.job_id = None
self.all_pages_loaded = False
self.buffer = []
def fetchone(self) -> Union[List, None]:
"""Fetch the next row of a query result set"""
# pylint: disable=not-callable
return self.next()
def next(self) -> Union[List, None]:
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if not self.buffer:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
location=self.location,
pageToken=self.page_token,
)
.execute(num_retries=self.num_retries)
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = [_bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f'])]
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.flush_results()
return None
return self.buffer.pop(0)
def fetchmany(self, size: Optional[int] = None) -> list:
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
result.append(one)
return result
def fetchall(self) -> List[list]:
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
result.append(one)
return result
def get_arraysize(self) -> int:
"""Specifies the number of rows to fetch at a time with .fetchmany()"""
return self.buffersize or 1
def set_arraysize(self, arraysize: int) -> None:
"""Specifies the number of rows to fetch at a time with .fetchmany()"""
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes: Any) -> None:
"""Does nothing by default"""
def setoutputsize(self, size: Any, column: Any = None) -> None:
"""Does nothing by default"""
def _bind_parameters(operation: str, parameters: dict) -> str:
"""Helper method that binds parameters to a SQL query"""
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {} # type Dict[str, str]
for (name, value) in parameters.items():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, str):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s: str) -> str:
"""Helper method that escapes parameters to a SQL query"""
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field: str, bq_type: str) -> Union[None, int, float, bool, str]:
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type in ('FLOAT', 'TIMESTAMP'):
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError(f"{string_field} must have value 'true' or 'false'")
return string_field == 'true'
else:
return string_field
def _split_tablename(
table_input: str, default_project_id: str, var_name: Optional[str] = None
) -> Tuple[str, str, str]:
if '.' not in table_input:
raise ValueError(f'Expected table name in the format of <dataset>.<table>. Got: {table_input}')
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
else:
return f"Format exception for {var_name}: "
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(
'{var}Use either : or . to specify project '
'got {input}'.format(var=var_print(var_name), input=table_input)
)
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'.format(var=var_print(var_name), input=table_input)
)
cmpt = rest.split('.')
if len(cmpt) == 3:
if project_id:
raise ValueError("{var}Use either : or . to specify project".format(var=var_print(var_name)))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'.format(var=var_print(var_name), input=table_input)
)
if project_id is None:
if var_name is not None:
log.info(
'Project not included in %s: %s; using project "%s"',
var_name,
table_input,
default_project_id,
)
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(
destination_dataset_table: Optional[str], time_partitioning_in: Optional[Dict]
) -> Dict: # if it is a partitioned table ($ is in the table name) add partition load option
if time_partitioning_in is None:
time_partitioning_in = {}
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
def _validate_value(key: Any, value: Any, expected_type: Type) -> None:
"""Function to check expected type and raise error if type is not correct"""
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(key, expected_type, type(value)))
def _api_resource_configs_duplication_check(
key: Any, value: Any, config_dict: dict, config_dict_name='api_resource_configs'
) -> None:
if key in config_dict and value != config_dict[key]:
raise ValueError(
"Values of {param_name} param are duplicated. "
"{dict_name} contained {param_name} param "
"in `query` config and {param_name} was also provided "
"with arg to run_query() method. Please remove duplicates.".format(
param_name=key, dict_name=config_dict_name
)
)
def _validate_src_fmt_configs(
source_format: str,
src_fmt_configs: dict,
valid_configs: List[str],
backward_compatibility_configs: Optional[Dict] = None,
) -> Dict:
"""
Validates the given src_fmt_configs against a valid configuration for the source format.
Adds the backward compatibility config to the src_fmt_configs.
:param source_format: File format to export.
:type source_format: str
:param src_fmt_configs: Configure optional fields specific to the source format.
:type src_fmt_configs: dict
:param valid_configs: Valid configuration specific to the source format
:type valid_configs: List[str]
:param backward_compatibility_configs: The top-level params for backward-compatibility
:type backward_compatibility_configs: dict
"""
if backward_compatibility_configs is None:
backward_compatibility_configs = {}
for k, v in backward_compatibility_configs.items():
if k not in src_fmt_configs and k in valid_configs:
src_fmt_configs[k] = v
for k, v in src_fmt_configs.items():
if k not in valid_configs:
raise ValueError(f"{k} is not a valid src_fmt_configs for type {source_format}.")
return src_fmt_configs
|
apache-2.0
|
glennq/scikit-learn
|
sklearn/preprocessing/tests/test_data.py
|
15
|
61914
|
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_deprecation_minmax_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = MinMaxScaler().fit(X)
depr_message = ("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
assert_warns_message(DeprecationWarning, depr_message, getattr, scaler,
"data_range")
depr_message = ("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
assert_warns_message(DeprecationWarning, depr_message, getattr, scaler,
"data_min")
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_deprecation_standard_scaler():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
scaler = StandardScaler().fit(X)
depr_message = ("Function std_ is deprecated; Attribute ``std_`` will be "
"removed in 0.19. Use ``scale_`` instead")
std_ = assert_warns_message(DeprecationWarning, depr_message, getattr,
scaler, "std_")
assert_array_equal(std_, scaler.scale_)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
|
bsd-3-clause
|
ephes/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
247
|
3846
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
bsd-3-clause
|
felipebetancur/numpy
|
numpy/lib/recfunctions.py
|
148
|
35012
|
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
bsd-3-clause
|
cbeighley/peregrine
|
peregrine/short_set.py
|
3
|
24942
|
## Copyright (C) 2014 Planet Labs Inc.
# Author: Henry Hallam
#
# Tools for performing point solutions from short sample sets
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from datetime import datetime, timedelta
from numpy import dot
from numpy.linalg import norm
from peregrine.ephemeris import calc_sat_pos, obtain_ephemeris
from peregrine.gps_time import datetime_to_tow
from scipy.optimize import fmin, fmin_powell
from warnings import warn
import cPickle
import hashlib
import math
import numpy as np
import os, os.path
import peregrine.acquisition
import peregrine.gps_constants as gps
import peregrine.samples
import peregrine.warm_start
import logging
logger = logging.getLogger(__name__)
dt = lambda sec: timedelta(seconds=sec)
def pseudoranges_from_ranges(ranges, prn_ref):
pseudoranges = {}
for prn, rngs in ranges.iteritems():
pseudoranges[prn] = rngs - ranges[prn_ref]
return pseudoranges
def resolve_ms_integers(obs_pr, pred_pr, prn_ref, disp = True):
# Solve for the pseudorange millisecond ambiguities
obs_pr = pseudoranges_from_ranges(obs_pr, prn_ref)
pred_pr = pseudoranges_from_ranges(pred_pr, prn_ref)
if disp:
print "Resolving millisecond integers:"
for prn, pr in obs_pr.iteritems():
pr_int_est = (pred_pr[prn] - pr) / gps.code_wavelength
pr_int = round(pr_int_est)
if abs(pr_int - pr_int_est) > 0.15:
logger.warn("Pseudorange integer for PRN %2d is %.4f" % (
prn + 1, pr_int_est) + ", which isn't very close to an integer.")
pr += pr_int * gps.code_wavelength
obs_pr[prn] = pr
if disp:
print ("PRN %2d: pred pseudorange = %9.3f km, obs = %9.3f, " + \
"(pred - obs) = %9.3f km") % (
prn + 1,
pred_pr[prn] / 1e3,
pr / 1e3,
(pred_pr[prn] - pr) / 1e3
)
return obs_pr
def nav_bit_hypotheses(n_ms):
import itertools
def fill_remainder(n_ms):
if n_ms <= 20:
return [ [1]*n_ms, [-1]*n_ms]
return [b + f for b in [[1]*20, [-1]*20] for f in fill_remainder(n_ms - 20)]
hs = []
for nav_edge_phase in range(1,min(n_ms,20)):
h = [([1]*nav_edge_phase) + f for f in fill_remainder(n_ms - nav_edge_phase)]
hs += h
return [k for k,v in itertools.groupby(sorted(hs))]
def long_correlation(signal, ca_code, code_phase, doppler, settings, plot=False, coherent = 0, nav_bit_hypoth = None):
from swiftnav.correlate import track_correlate
code_freq_shift = (doppler / gps.l1) * gps.chip_rate
samples_per_chip = settings.samplingFreq / (gps.chip_rate + code_freq_shift)
samples_per_code = samples_per_chip * gps.chips_per_code
numSamplesToSkip = round(code_phase * samples_per_chip)
remCodePhase = (1.0 * numSamplesToSkip / samples_per_chip) - code_phase
remCarrPhase = 0.0
n_ms = int((len(signal) - numSamplesToSkip) / samples_per_code)
i_p = []
q_p = []
i_c = []
q_c = []
costas_i = 0.0
costas_q = 0.0
for loopCnt in range(n_ms):
rawSignal = signal[numSamplesToSkip:]#[:blksize_]
I_E, Q_E, I_P, Q_P, I_L, Q_L, blksize, remCodePhase, remCarrPhase = track_correlate_(
rawSignal,
code_freq_shift + gps.chip_rate,
remCodePhase,
doppler + settings.IF,
remCarrPhase, ca_code, settings)
numSamplesToSkip += blksize
#print "@ %d, I_P = %.0f, Q_P = %.0f" % (loopCnt, I_P, Q_P)
i_p.append(I_P)
q_p.append(Q_P)
if coherent == 0:
Q_C = 0
I_C = math.sqrt(I_P**2 + Q_P**2)
elif coherent == 0.5:
phase = math.atan(Q_P / I_P)
mag = math.sqrt(I_P**2 + Q_P**2)
I_C = mag * math.cos(phase)
Q_C = mag * math.sin(phase)
elif coherent == 1:
if nav_bit_hypoth is None:
I_C = I_P
Q_C = Q_P
else:
I_C = I_P * nav_bit_hypoth[loopCnt]
Q_C = Q_P * nav_bit_hypoth[loopCnt]
else:
raise ValueError("'coherent' should be 0, 0.5 or 1")
i_c.append(I_C)
q_c.append(Q_C)
costas_i += I_C
costas_q += Q_C
if plot:
ax = plt.figure(figsize=(5,5)).gca()
ax.plot(i_p, q_p, '.')
ax.plot(i_c, q_c, 'r+')
ax.plot(costas_i/n_ms, costas_q/n_ms, 'ko')
ax.axis('equal')
plt.xlim([-1000, 1000])
plt.ylim([-1000, 1000])
plt.title(str(doppler))
return ((costas_i / n_ms) ** 2 + (costas_q / n_ms) ** 2)
def refine_ob(signal, acq_result, settings, print_results = True, return_sweeps = False):
# TODO: Fit code phase results for better resolution
from peregrine.include.generateCAcode import caCodes
from scipy import optimize as opt
samples_per_chip = settings.samplingFreq / gps.chip_rate
samples_per_code = samples_per_chip * gps.chips_per_code
# Get a vector with the C/A code sampled 1x/chip
ca_code = caCodes[acq_result.prn]
# Add wrapping to either end to be able to do early/late
ca_code = np.concatenate(([ca_code[1022]],ca_code,[ca_code[0]]))
dopp_offset_search = 100 # Hz away from acquisition
code_offsets = np.arange(-1,1, 1.0 / 16 / 2)
pwr_1 = []
for code_offset in code_offsets:
pwr_1.append(long_correlation(signal, ca_code, acq_result.code_phase + code_offset, acq_result.doppler + 0.0, settings,
coherent = 0))
code_offset_best_noncoherent = code_offsets[np.argmax(pwr_1)]
n_ms = int((len(signal) - samples_per_chip * (acq_result.code_phase + code_offset_best_noncoherent)) / samples_per_code)
nbhs = nav_bit_hypotheses(n_ms)
pwr_2 = []
dopp_offset_best_2 = []
for nbh in nbhs:
def score(dopp_offset):
return -long_correlation(signal, ca_code,
acq_result.code_phase + code_offset_best_noncoherent,
acq_result.doppler + dopp_offset,
settings,
coherent = 1,
nav_bit_hypoth = nbh,
plot=False)
xopt, fval, _, _ = opt.fminbound(score, -dopp_offset_search, dopp_offset_search, xtol=0.1, maxfun=500, full_output=True, disp=1)
pwr_2.append(-fval)
dopp_offset_best_2.append(xopt)
nbh_best = np.argmax(pwr_2)
dopp_offset_best = dopp_offset_best_2[nbh_best]
try:
nbp_best = nbhs[nbh_best].index(-1) % 20
except ValueError:
nbp_best = None
if return_sweeps:
dopp_plot_offsets = np.arange(-50,50,2) + dopp_offset_best
pwr_3 = []
for dopp_offset in dopp_plot_offsets:
pwr_3.append(long_correlation(signal, ca_code,
acq_result.code_phase + code_offset_best_noncoherent,
acq_result.doppler + dopp_offset, settings,
coherent = 1, nav_bit_hypoth = nbhs[nbh_best]))
pwr_4 = []
for code_offset in code_offsets:
pwr_4.append(long_correlation(signal, ca_code, acq_result.code_phase + code_offset,
acq_result.doppler + dopp_offset_best,
settings, coherent = 1, nav_bit_hypoth = nbhs[nbh_best]))
code_offset_best = code_offsets[np.argmax(pwr_4)]
if print_results:
print "%2d\t%+6.0f\t%5.1f\t%+6.3f\t\t%d\t" % (
acq_result.prn + 1, acq_result.doppler, acq_result.snr,
code_offset_best_noncoherent, nbh_best),
if nbp_best is None:
print "-",
else:
print nbp_best,
print "\t%+6.1f\t\t%+6.3f" % (dopp_offset_best, code_offset_best)
ob_cp = acq_result.code_phase + code_offset_best
ob_dopp = acq_result.doppler + dopp_offset_best
if return_sweeps:
sweeps = (code_offsets, code_offset_best,
dopp_plot_offsets, dopp_offset_best,
nbh_best,
pwr_1, pwr_2, pwr_3, pwr_4)
return ob_cp, ob_dopp, sweeps
else:
return ob_cp, ob_dopp
def plot_refine_results(acq_result, sweeps):
import matplotlib.pyplot as plt
(code_offsets, code_offset_best,
dopp_plot_offsets, dopp_offset_best,
nbh_best,
pwr_1, pwr_2, pwr_3, pwr_4) = sweeps
fig=plt.figure(figsize=(14,10))
fig.suptitle(str(acq_result))
ax=fig.add_subplot(221)
plt.title("Initial code phase search (noncoherent)")
ax.plot(code_offsets, pwr_1, 'k.')
plt.xlabel('Code phase offset')
plt.ylim([0, 500e3])
ax=fig.add_subplot(222)
ax.plot(pwr_2, 'r.')
ax.plot(nbh_best, np.amax(pwr_2), 'k*')
plt.ylim([0, 500e3])
plt.title("Nav edge search (coherent)")
plt.xlabel('Nav bits hypothesis #')
ax=fig.add_subplot(223)
ax.plot(dopp_plot_offsets, pwr_3, 'b.')
ax.plot(dopp_offset_best, np.amax(pwr_2), 'k*')
plt.ylim([0, 500e3])
plt.title("Carrier freq search (coherent)")
plt.xlabel('Carrier freq offset')
ax=fig.add_subplot(224)
ax.plot(code_offsets, pwr_4, 'g.')
ax.plot(code_offset_best, np.amax(pwr_4), 'k*')
plt.ylim([0, 500e3])
plt.title("Final code phase search (coherent)")
plt.xlabel('Code phase offset')
def refine_obs(signal, acq_results, settings,
print_results = True,
plot = True,
multi = True):
from peregrine.parallel_processing import parmap
mapper = parmap if multi else map
obs_cp = {}
obs_dopp = {}
sweepss = {}
if print_results:
print "PRN\tAcquisition:\tNon-coherent\tNavigation bit:\tCoherent\tCoherent"
print "\tDopp\tSNR\tcode phase\tHyp #\tPhase\tdoppler\t\tcode phase"
res = mapper(lambda a: refine_ob(signal, a, settings,
print_results = print_results, return_sweeps = True),
acq_results)
for i, a in enumerate(acq_results):
ob_cp, ob_dopp, sweeps = res[i]
obs_cp[a.prn] = ob_cp
obs_dopp[a.prn] = ob_dopp
sweepss[a.prn] = sweeps
if plot:
import matplotlib.pyplot as plt
for a in acq_results:
plot_refine_results(a, sweepss[a.prn])
plt.show()
return obs_cp, obs_dopp
def predict_observables(prior_traj, prior_datetime, prns, ephem, window):
from datetime import timedelta
from numpy.linalg import norm
from numpy import dot
"""Given a list of PRNs, a set of ephemerides, a nominal capture time (datetime) and a
and a time window (seconds), compute the ranges and dopplers for
each satellite at 1ms shifts."""
timeres = 50 * gps.code_period # Might be important to keep this an integer number of code periods
t0 = prior_datetime - timedelta(seconds=window / 2.0)
ranges = {}
dopplers = {}
for prn in prns:
ranges[prn] = []
dopplers[prn] = []
times = []
for tt in np.arange(0, window, timeres):
t = t0 + timedelta(seconds = tt)
times.append(t)
r, v = prior_traj(t)
for prn in prns:
wk, tow = datetime_to_tow(t)
gps_r, gps_v, clock_err, clock_rate_err = calc_sat_pos(ephem[prn], tow, week = wk)
# TODO: Should we be applying sagnac correction here?
# Compute doppler
los_r = gps_r - r
ratepred = dot(gps_v - v, los_r) / norm(los_r)
shift = (-ratepred / gps.c - clock_rate_err)* gps.l1
# Compute range
rangepred = norm(r - gps_r)
# Apply GPS satellite clock correction
rangepred -= clock_err * gps.c
ranges[prn].append(rangepred)
dopplers[prn].append(shift)
for prn in prns:
ranges[prn] = np.array(ranges[prn])
dopplers[prn] = np.array(dopplers[prn])
return ranges, dopplers, times
def minimize_doppler_error(obs_dopp, times, pred_dopp, plot = False):
norm_dopp_err = np.zeros_like(times)
prns = obs_dopp.keys()
for i, t in enumerate(times):
d_diff = {prn: obs_dopp[prn] - pred_dopp[prn][i] for prn in prns}
mean = np.mean(d_diff.values())
sum_dopp_err_sq = sum([(d_diff[prn] - mean) ** 2 for prn in prns])
norm_dopp_err[i] = math.sqrt(sum_dopp_err_sq)
if plot:
import matplotlib.pyplot as plt
ax = plt.figure(figsize=(8,4)).gca()
plt.title("Time of capture refinement by pseudodoppler vs prior trajectory\n")
plt.ylabel('Pseudodoppler error norm / Hz')
ax.plot(times, norm_dopp_err, 'b+-')
plt.show()
i_min = np.argmin(norm_dopp_err)
return i_min, times[i_min]
def plot_expected_vs_measured(acqed_prns, prn_ref,
obs_pr, obs_dopp,
prior_traj, t_better,
ephem):
import matplotlib.pyplot as plt
# Compute predicted observables around this new estimate of capture time
pred_ranges, pred_dopplers, times = predict_observables(prior_traj, t_better, acqed_prns, ephem, 20)
pred_pr = pseudoranges_from_ranges(pred_ranges, prn_ref)
ax = plt.figure(figsize=(12,6)).gca()
plt.title("Code pseudophase referred to PRN %d.\n" % (prn_ref + 1) +
"Solid lines are predicted pseudophase for each GPS sat.\n" +
"Dotted lines are observed pseudophases")
plt.ylabel('Code phase / m')
colors = "bgrcmyk"
for i, prn in enumerate(acqed_prns):
color = colors[i % len(colors)]
ax.plot([times[0], times[-1]], [obs_pr[prn], obs_pr[prn]], color + ':')
ax.plot(times, pred_pr[prn], color + '-')
ax = plt.figure(figsize=(12,6)).gca()
plt.title("Code pseudophase error (observed - predicted).\n" +
"Ideally these should come to a minimum (< 10 km) at the true time of capture.\n"
"Note, this is still coupled to the prior trajectory.")
plt.ylabel('Code phase error / m')
for i, prn in enumerate(acqed_prns):
color = colors[i % len(colors)]
if i > 0:
ax.plot(times, [obs_pr[prn] - pr for pr in pred_pr[prn]], color + '-')
#plt.ylim([-300,300])
#plt.xlim([datetime.datetime(2014,5,4,0,44,13),datetime.datetime(2014,5,4,0,44,14)])
ax = plt.figure(figsize=(12,6)).gca()
plt.title("norm(code pseudophase error)\n"
"Note, this is still coupled to the prior trajectory.")
plt.ylabel('norm(Code phase error) / km')
norm_err = np.zeros_like(times)
for i, prn in enumerate(acqed_prns):
norm_err += [(obs_pr[prn] - pr) ** 2 for pr in pred_pr[prn]]
norm_err /= len(acqed_prns)
norm_err = [math.sqrt(e)/1e3 for e in norm_err]
ax.plot(times, norm_err)
# i = times.index(t_better)
plt.show()
def sagnac(gps_r, tof):
# Apply Sagnac correction (reference frame rotation during signal time of flight)
wEtau = gps.omegae_dot * tof # Rotation of Earth during time of flight in radians.
gps_r_sagnac = np.empty_like(gps_r)
gps_r_sagnac[0] = gps_r[0] + wEtau * gps_r[1];
gps_r_sagnac[1] = gps_r[1] - wEtau * gps_r[0];
gps_r_sagnac[2] = gps_r[2];
return gps_r_sagnac
def pt_step(r_recv, delta_t, ephem, obs_pr, t_recv_ref):
# t_recv = t_recv_ref + delta_t
residuals = []
los = {}
tot = {}
wk, tow_ref = datetime_to_tow(t_recv_ref)
for prn, ob_pr in obs_pr.iteritems():
range_obs = ob_pr + delta_t * gps.c
tof = range_obs / gps.c
tot[prn] = tow_ref - tof
# Compute predicted range
gps_r, gps_v, clock_err, clock_rate_err = calc_sat_pos(ephem[prn], tot[prn], week = wk)
gps_r_sagnac = sagnac(gps_r, tof)
line_of_sight = gps_r_sagnac - r_recv
range_pred = norm(line_of_sight)
# Apply GPS satellite clock correction
range_pred -= clock_err * gps.c
range_residual = range_pred - range_obs
residuals.append(range_residual)
los[prn] = -line_of_sight / norm(line_of_sight)
return residuals, los, tot
def p_solve(r_init, t_recv, obs_pr, ephem):
# TODO: Adapt libswiftnav solver instead. This is way slow.
# Solve for position given pseudoranges, time of reception and initial position guess
def score(params):
r = params[0:3]
delta_t = params[3]
residuals, _, _ = pt_step(r, delta_t, ephem, obs_pr, t_recv)
return norm(residuals)
params_init = np.append(r_init, [gps.nominal_range / gps.c])
params_min = fmin_powell(score, params_init, disp = False)
r_sol = params_min[0:3]
residuals, los, tot = pt_step(r_sol, params_min[3], ephem, obs_pr, t_recv)
return r_sol, residuals, los, tot
def pt_solve(r_init, t_init, obs_pr, ephem):
# Solve for position and time given pseudoranges and initial guess for position and time of reception
# Implemented as an outer loop around p_solve
def score(params):
delta_t_recv = params[0]
t_recv = t_init + timedelta(seconds = delta_t_recv)
r_sol, residuals, los, tot = p_solve(r_init, t_recv, obs_pr, ephem)
return norm(residuals)
params_min = fmin(score, [0], disp = True)
t_sol = t_init + timedelta(seconds = params_min[0])
r_sol, residuals, los, tot = p_solve(r_init, t_sol, obs_pr, ephem)
return r_sol, t_sol, los, tot, residuals
def plot_t_recv_sensitivity(r_init, t_ref, obs_pr, ephem, spread = 0.2, step = 0.025):
import matplotlib.pyplot as plt
times = [t_ref + dt(offset) for offset in np.arange(-spread, spread, step)]
scores = []
t_sols = []
ax = plt.figure(figsize=(12,6)).gca()
plt.ylabel('Residual norm / m')
for t_recv in times:
r_sol, residuals, _, _ = p_solve(r_init, t_recv, obs_pr, ephem)
scores.append(np.linalg.norm(residuals))
ax.plot(times, scores,'+-')
plt.xlabel('t_recv (step = %.0f ms)' % (step / 1E-3))
plt.ylim([0, max(scores)])
plt.title('Sensitivity of solution to reception time')
plt.show()
def vel_solve(r_sol, t_sol, ephem, obs_pseudodopp, los, tot):
prns = los.keys()
pred_prr = {}
for prn in prns:
_, gps_v, _, clock_rate_err = calc_sat_pos(ephem[prn], tot[prn])
pred_prr[prn] = -dot(gps_v, los[prn]) + clock_rate_err * gps.c
los = np.array(los.values())
obs_prr = -(np.array(obs_pseudodopp.values()) / gps.l1) * gps.c
pred_prr = np.array(pred_prr.values())
prr_err = obs_prr - pred_prr
G = np.append(los, (np.array([[1] * len(prns)])).transpose(), 1)
X = prr_err
sol, v_residsq, _, _ = np.linalg.lstsq(G, X)
v_sol = sol[0:3]
f_sol = (sol[3] / gps.c) * gps.l1
print "Velocity residual norm: %.1f m/s" % math.sqrt(v_residsq)
print "Receiver clock frequency error: %+6.1f Hz" % f_sol
return v_sol, f_sol
def postprocess_short_samples(signal, prior_trajectory, t_prior, settings,
plot = True):
"""
Postprocess a short baseband sample record into a navigation solution.
Parameters
----------
sample_filename : string
Filename to load baseband samples from
prior_traj : state vector tuple ([x,y,z], [vx, vy, vz]) or function f(t)
This specifies the prior estimate of the receiver trajectory.
It can either be a single position / velocity state vector, or
a function of time.
It is given in the ECEF frame in meters and meters / second.
t_prior : datetime
Prior estimate of the time the samples were captures (on GPST timescale)
settings : peregrine settings class
e.g. from peregrine.initSettings.initSettings()
plot : bool
Make pretty graphs.
Returns
-------
acq_results : [:class:`AcquisitionResult`]
List of :class:`AcquisitionResult` objects loaded from the file.
"""
if hasattr(prior_trajectory, '__call__'):
prior_traj_func = True
prior_traj = prior_trajectory
else:
prior_traj_func = False
prior_traj = lambda t: prior_trajectory
sig_len_ms = len(signal) / settings.samplingFreq / 1E-3
print "Signal is %.2f ms long." % sig_len_ms
r_prior, v_prior = prior_traj(t_prior)
ephem = peregrine.ephemeris.obtain_ephemeris(t_prior, settings)
n_codes_integrate = min(15, int(sig_len_ms / 2))
obs_cache_dir = os.path.join(settings.cacheDir, "obs")
obs_cache_file = os.path.join(obs_cache_dir, hashlib.md5(signal).hexdigest())
if settings.useCache and os.path.exists(obs_cache_file):
with open(obs_cache_file, 'rb') as f:
(acqed_prns, obs_cp, obs_dopp) = cPickle.load(f)
print "Loaded cached observations from '%s'." % obs_cache_file
else:
print "Performing acquisition with %d ms integration." % n_codes_integrate
acqed = peregrine.warm_start.warm_start(signal,
t_prior, r_prior, v_prior,
ephem, settings,
n_codes_integrate)
# Rearrange to put sat with smallest range-rate first.
# This makes graphs a bit less hairy.
acqed.sort(key = lambda a: abs(a.doppler))
acqed_prns = [a.prn for a in acqed]
# Improve the observables with fine correlation search
obs_cp, obs_dopp = refine_obs(signal, acqed[:], settings, plot = plot)
if settings.useCache:
if not os.path.exists(obs_cache_dir):
os.makedirs(obs_cache_dir)
with open(obs_cache_file, 'wb') as f:
cPickle.dump((acqed_prns, obs_cp, obs_dopp), f,
protocol=cPickle.HIGHEST_PROTOCOL)
# Check whether we have enough satellites
if len(acqed_prns) < 5:
logger.error(("Acquired %d SVs; need at least 5 for a solution" +
" in short-capture mode.") % len(acqed_prns))
return
# Determine the reference PRN
prn_ref = acqed_prns[0]
print "PRNs in use: " + str([p + 1 for p in acqed_prns])
# Improve the time part of the prior estimate by minimizing doppler residuals
pred_ranges, pred_dopplers, times = predict_observables(prior_traj, t_prior,
acqed_prns, ephem,
30)
i, t_better = minimize_doppler_error(obs_dopp, times, pred_dopplers,
plot = plot)
# Revise the prior state vector based on this new estimate of capture time
r_better, v_better = prior_traj(t_better)
delta_t = t_better.second - t_prior.second + (t_better.microsecond - t_prior.microsecond)*1e-6
delta_r = np.linalg.norm(np.array(r_better) - r_prior)
print "By minimizing doppler residuals, adjusted the prior time and position by %s seconds, %.1f km" % (
delta_t, delta_r/ 1e3)
pred_ranges, pred_dopplers, times = predict_observables(
prior_traj, t_better, acqed_prns, ephem, 1e-9)
pred_pr_t_better = {prn: pred_ranges[prn][0] for prn in acqed_prns}
# Resolve code phase integers to find observed pseudoranges
obs_pr = {prn: (obs_cp[prn] / gps.chip_rate) * gps.c for prn in acqed_prns}
obs_pr = resolve_ms_integers(obs_pr, pred_pr_t_better, prn_ref, disp = True)
if plot:
plot_expected_vs_measured(acqed_prns, prn_ref, obs_pr, obs_dopp,
prior_traj, t_better, ephem)
# Perform PVT navigation solution
r_sol, t_sol, los, tot, residuals = pt_solve(r_better, t_better, obs_pr,
ephem)
resid_norm_norm = norm(residuals) / len(acqed_prns)
if resid_norm_norm > settings.navSanityMaxResid:
logger.error("PVT solution not satisfactorily converged: %.0f > %.0f" % (
resid_norm_norm, settings.navSanityMaxResid))
return
print "Position: " + str(r_sol)
print "t_sol: " + str(t_sol)
print "t_prior: " + str(t_prior)
v_sol, rx_freq_err = vel_solve(r_sol, t_sol, ephem, obs_dopp, los, tot)
print "Velocity: %s (%.1f m/s)" % (v_sol, norm(v_sol))
# How accurate is the time component of the solution?
if plot:
plot_t_recv_sensitivity(r_sol, t_sol, obs_pr, ephem,
spread = 0.1, step = 0.01)
return r_sol, v_sol, t_sol
|
gpl-3.0
|
charanpald/sandbox
|
sandbox/ranking/RankBoost.py
|
1
|
5599
|
import os
import subprocess
import tempfile
import numpy
import logging
from sandbox.predictors.AbstractPredictor import AbstractPredictor
from sandbox.util.Parameter import Parameter
from sandbox.util.Evaluator import Evaluator
from sklearn.cross_validation import StratifiedKFold
"""
A wrapper for the RankBoost code written in RankLib. Note that RankLib must
be on the current path.
"""
class RankBoost(AbstractPredictor):
def __init__(self, numProcesses=1):
super(RankBoost, self).__init__()
self.iterations = 100
self.learners = 20
self.bestResponse = 1
self.processes = numProcesses
self.libPath = os.getenv("HOME") + "/Documents/Postdoc/Code/semisup_rankboost/"
def setIterations(self, iterations):
Parameter.checkInt(iterations, 0, float('inf'))
self.iterations = iterations
def setLearners(self, learners):
Parameter.checkInt(learners, 0, float('inf'))
self.learners = learners
def setLibPath(self, libPath):
self.libPath = libPath
def saveExamples(self, X, y, fileName):
file = open(fileName, "w")
for i in range(X.shape[0]):
fStr = str(y[i]) + " "
for j in range(X.shape[1]):
if j!=X.shape[1]-1:
fStr += str(j+1) + ":" + str(X[i, j]) + " "
else:
fStr += str(j+1) + ":" + str(X[i, j])
fStr += "\n"
file.write(fStr)
file.close()
def getOutputStr(self):
"""
Return the command line output from the last command
"""
return self.outputStr
def learnModel(self, X, y):
#Must make sure examples are +1/-1
newY = numpy.array(y==self.bestResponse, numpy.int)*2 - 1
numTempFiles = 2
tempFileNameList = []
for i in range(numTempFiles):
fileObj = tempfile.NamedTemporaryFile(delete=False)
tempFileNameList.append(fileObj.name)
fileObj.close()
trainFileName = tempFileNameList[0]
modelFileName = tempFileNameList[1]
self.saveExamples(X, newY, trainFileName)
callList = [self.libPath + "ssrankboost-learn", "-t", str(self.iterations), "-n", str(self.learners)]
callList.extend([trainFileName, modelFileName])
try:
self.outputStr = subprocess.check_output(callList)
except AttributeError:
subprocess.call(callList)
modelFile = open(modelFileName, "r")
self.model = modelFile.read()
modelFile.close()
os.remove(modelFileName)
os.remove(trainFileName)
def predict(self, X):
numTempFiles = 3
tempFileNameList = []
for i in range(numTempFiles):
fileObj = tempfile.NamedTemporaryFile(delete=False)
tempFileNameList.append(fileObj.name)
fileObj.close()
testFileName = tempFileNameList[0]
scoreFileName = tempFileNameList[1]
modelFileName = tempFileNameList[2]
self.saveExamples(X, numpy.ones(X.shape[0]), testFileName)
modelFile = open(modelFileName, "w")
modelFile.write(self.model)
modelFile.close()
callList = [self.libPath + "ssrankboost-test", testFileName, modelFileName, scoreFileName]
try:
self.outputStr = subprocess.check_output(callList)
except AttributeError:
subprocess.call(callList)
os.remove(testFileName)
os.remove(modelFileName)
#Now read the scores files
scores = numpy.fromfile(scoreFileName, sep=" ")
os.remove(scoreFileName)
return scores
def modelSelect(self, X, y, folds=5):
"""
Do model selection for a dataset and then learn using the best parameters
according to the AUC.
"""
learnerList = numpy.arange(10, 51, 10)
meanAUCs = numpy.zeros(learnerList.shape[0])
stdAUCs = numpy.zeros(learnerList.shape[0])
for i in range(learnerList.shape[0]):
self.setLearners(learnerList[i])
meanAUCs[i], stdAUCs[i] = self.evaluateStratifiedCv(X, y, folds, metricMethod=Evaluator.auc)
self.setLearners(learnerList[numpy.argmax(meanAUCs)])
logging.debug("Best learner found: " + str(self))
self.learnModel(X, y)
def evaluateCvOuter(self, X, y, folds):
"""
Computer the average AUC using k-fold cross validation and the linear kernel.
"""
Parameter.checkInt(folds, 2, float('inf'))
idx = StratifiedKFold(y, folds)
metricMethods = [Evaluator.auc2, Evaluator.roc]
trainMetrics, testMetrics = AbstractPredictor.evaluateLearn2(X, y, idx, self.modelSelect, self.predict, metricMethods)
bestTrainAUCs = trainMetrics[0]
bestTrainROCs = trainMetrics[1]
bestTestAUCs = testMetrics[0]
bestTestROCs = testMetrics[1]
bestParams = {}
bestMetaDicts = {}
allMetrics = [bestTrainAUCs, bestTrainROCs, bestTestAUCs, bestTestROCs]
return (bestParams, allMetrics, bestMetaDicts)
def __str__(self):
outputStr = "RankBoost: learners=" + str(self.learners) + " iterations=" + str(self.iterations)
return outputStr
def copy(self):
learner = RankBoost()
learner.learners = self.learners
learner.iterations = self.iterations
return learner
def getMetricMethod(self):
return Evaluator.auc2
|
gpl-3.0
|
grhawk/ASE
|
tools/ase/test/fio/oi.py
|
2
|
2234
|
import sys
import numpy as np
from ase import Atoms
from ase.io import write, read
a = 5.0
d = 1.9
c = a / 2
atoms = Atoms('AuH',
positions=[(c, c, 0), (c, c, d)],
cell=(a, a, 2 * d),
pbc=(0, 0, 1))
extra = np.array([ 2.3, 4.2 ])
atoms.set_array('extra', extra)
atoms *= (1, 1, 2)
images = [atoms.copy(), atoms.copy()]
r = ['xyz', 'traj', 'cube', 'pdb', 'cfg', 'struct', 'cif', 'gen']
try:
import json
except ImportError:
pass
else:
r += ['json', 'db']
try:
import Scientific
version = Scientific.__version__.split('.')
print 'Found ScientificPython version: ', Scientific.__version__
if map(int, version) < [2, 8]:
print('ScientificPython 2.8 or greater required for numpy support')
raise ImportError
except ImportError:
print('No Scientific python found. Check your PYTHONPATH')
else:
r += ['etsf']
w = r + ['xsf', 'findsym']
try:
import matplotlib
except ImportError:
pass
else:
w += ['png', 'eps']
only_one_image = ['cube', 'png', 'eps', 'cfg', 'struct', 'etsf', 'gen',
'json', 'db']
for format in w:
print format, 'O',
fname1 = 'io-test.1.' + format
fname2 = 'io-test.2.' + format
write(fname1, atoms, format=format)
if format not in only_one_image:
write(fname2, images, format=format)
if format in r:
print 'I'
a1 = read(fname1)
assert np.all(np.abs(a1.get_positions() -
atoms.get_positions()) < 1e-6)
if format in ['traj', 'cube', 'cfg', 'struct', 'gen']:
assert np.all(np.abs(a1.get_cell() - atoms.get_cell()) < 1e-6)
if format in ['cfg']:
assert np.all(np.abs(a1.get_array('extra') -
atoms.get_array('extra')) < 1e-6)
if format not in only_one_image:
a2 = read(fname2)
a3 = read(fname2, index=0)
a4 = read(fname2, index=slice(None))
if format in ['cif'] and sys.platform in ['win32']:
pass # Fails on Windows:
# https://trac.fysik.dtu.dk/projects/ase/ticket/62
else:
assert len(a4) == 2
else:
print
|
gpl-2.0
|
akshayparopkari/kadambari
|
python/network_plot_python.py
|
1
|
8040
|
#!/usr/bin/env python
'''
Abstract: Create network plots based on correlation matrix.
Date: 04/27/2015
Author: Akshay Paropkari
'''
import sys
import argparse
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from collections import defaultdict
def nodes_classify(gramox_inf, nodelist, lvl):
'''
Classify nodes of the network graph based on gram strain of the OTU's.
:type gramox_inf: gramox data file
:param gramox_inf: Master file of all gramox data for all OTU's in the
following tab-separated format:
OTU Gram Status Oxygen Requirement Source
:type nodelist: list
:param nodelist: List of all nodes in the graph. Usually networkx
attribute such as G.nodes() would work.
:type lvl: str
:param lvl: Choose between genus(g) or species(s) phylogenetic level to
use for classifying OTU's. Defaults to species(s) level.
:type return: list
:return: Returns 3 lists g_pos, g_neg, and unk, which have classified
gram-positive, gram-negative and unknown/NA OTU's respectively.
'''
g_pos = []
g_neg = []
unk = []
ctk = []
with open(gramox_inf, 'rU') as gramoxf:
if lvl == 'g':
data = {line.strip().split('\t')[0]: line.strip().split('\t')[2]
for line in gramoxf.readlines()[1:]}
elif lvl == 's':
data = {line.strip().split('\t')[1]: line.strip().split('\t')[2]
for line in gramoxf.readlines()[1:]}
for item in nodelist:
if item in data.keys():
if data[item] == '1':
g_pos.append(item)
elif data[item] == '0':
g_neg.append(item)
else:
unk.append(item)
elif item[:2] == 'Hu':
ctk.append(item)
else:
unk.append(item)
return g_pos, g_neg, ctk, unk
def draw_network_graphs(pearson_inf, gramox_inf, lvl, filter_pct=None,
category=None):
'''
This function accepts statistically significant Pearson's correlation
data to create graph nodes and edges and draws them out on a network
graph.
:type pearsoncorr: file path
:param pearsoncorr: File with output of JMP correlation. The format
(first row) for the tab-separated file should be:
Category->Variable->by Variable->Correlation
:type gramox_inf: gramox data file
:param gramox_inf: Master file of all gramox data for all OTU's in the
following tab-separated format:
Genus->Species->Gram Status->Oxygen Requirement->Source
:type lvl: str
:param lvl: Choose between genus(g) or species(s) phylogenetic level to
use for classifying OTU's. Defaults to species(s) level.
:type filter_pct: float
:param filter_pct: Specify the minimum value of correlation strength
to display. By default, all correlations will be
portrayed. Range is (0,1).
:type category: str
:param category: Provide for which category you want to create a
network graph, which should be one of the options
from the first column of pearsoncorr data file.
:type return: network graph/figure
:return: Returns a network graph with OTU or cytokines as nodes and
their Pearson's correlation as edges. Green edges
represent positive correlation and red edges denote negative
correlation. Also, OTU nodes are colored based on gram strain,
dark blue are gram-positive and light blue are gram-negative, '
'yellow are cytokines.'
'''
# Read pearson correlation data into a dataframe.
pdata = pd.read_csv(pearson_inf, sep='\t')
# Creating a multigraph
G = nx.MultiGraph()
# Prep edges for graph
pdata = pd.read_csv(pearson_inf, sep='\t')
for rows in pdata.iterrows():
row = rows[1]
if category is None:
G.add_edge(row['Variable'], row['by Variable'], weight=row['Correlation'])
else:
if row['Category'] == category:
if filter_pct is None:
G.add_edge(row['Variable'], row['by Variable'], weight=row['Correlation'])
else:
if row['Correlation'] >= filter_pct or row['Correlation'] <= -(filter_pct):
G.add_edge(row['Variable'], row['by Variable'], weight=row['Correlation'])
print 'Length of nodes and edges:', len(G.nodes()), len(G.edges())
# Classify positive or negative correlation edges
pos_corr = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0]
neg_corr = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] < 0]
# Classify nodes based on gram strains
g_pos, g_neg, ctk, unk = nodes_classify(gramox_inf, G.nodes(), lvl)
print 'Length of gpos, gneg, ctk, unk:', len(g_pos), len(g_neg), len(ctk), len(unk)
# Confirm if all nodes have been classified
try:
assert len(g_neg) + len(g_pos) + len(ctk) + len(unk) == len(G.nodes())
except AssertionError:
return 'Classified nodes do not add up to total number of nodes.'
# Draw network graph
plt.figure(figsize=(20, 20))
pos = nx.spring_layout(G, iterations=200, k=0.2)
nx.draw_networkx_nodes(G, pos, nodelist=g_pos, node_color='#3366ff',
node_size=500) # gpos: dark blue
nx.draw_networkx_nodes(G, pos, nodelist=g_neg, node_color='#99ccff',
node_size=500) # gneg: light blue
nx.draw_networkx_nodes(G, pos, nodelist=ctk, node_color='#FFDB19',
node_size=500) # cytokines: yellow
nx.draw_networkx_nodes(G, pos, nodelist=unk, node_color='#808080',
node_size=500) # unknown: gray
nx.draw_networkx_edges(G, pos, alpha=0.5, edgelist=pos_corr,
edge_color='#008000') # poscorr: dark green
nx.draw_networkx_edges(G, pos, alpha=0.5, edgelist=neg_corr,
edge_color='r') # negcorr: red
nx.draw_networkx_labels(G, pos)
font = {'color': 'k', 'fontweight': 'bold', 'fontsize': 24}
plt.axis('off')
plt.show()
def prog_options():
parser = argparse.ArgumentParser(description='Create network plots based '
'on correlation matrix.')
parser.add_argument('in_corr_mat',
help='Correlation matrix file. The format'
' for the tab-separated file should be: '
'Category->Variable->by Variable->Correlation')
parser.add_argument('in_gramox_fnh',
help='Master file of all gramox data for all OTU\'s '
'in the following tab-separated format: '
'Genus->Species->Gram Status->Oxygen Requirement->Source')
parser.add_argument('phy_lvl',
help='Choose between genus(g) or species(s) '
'phylogenetic level to use for classifying '
'OTU\'s. Defaults to species(s) level')
parser.add_argument('fil_pct', type=float,
help='Specify the minimum value of correlation '
'strength to display. By default, all '
'correlations will be portrayed. Range is (0,1)')
parser.add_argument('cat_name',
help='Program will plot network graph for this '
'category only')
return parser.parse_args()
def main():
args = prog_options()
draw_network_graphs(args.in_corr_mat, args.in_gramox_fnh, args.phy_lvl,
args.fil_pct, args.cat_name)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
selective-inference/selective-inference
|
doc/learning_examples/HIV/stability_selection.py
|
3
|
3772
|
import functools
import numpy as np
from scipy.stats import norm as ndist
from sklearn.linear_model import lasso_path
# load in the X matrix
from selection.tests.instance import HIV_NRTI
X_full = HIV_NRTI(datafile="NRTI_DATA.txt", standardize=False)[0] * 1.
print(X_full.dtype)
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
n, p = X.shape
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth /= np.sqrt(n)
truth *= sigma
y = X.dot(truth) + sigma * np.random.standard_normal(n)
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
print(dispersion, sigma**2)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(XTX, XTXi, sampler):
min_success = 6
ntries = 10
def _alpha_grid(X, y, center, XTX):
n, p = X.shape
alphas, coefs, _ = lasso_path(X.copy(), y.copy(), Xy=center.copy(), precompute=XTX.copy())
nselected = np.count_nonzero(coefs, axis=0)
alphas = alphas[nselected < 20]
return alphas
alpha_grid = _alpha_grid(X, y, sampler.center, XTX)
success = np.zeros((p, alpha_grid.shape[0]))
for _ in range(ntries):
scale = 1. # corresponds to sub-samples of 50%
noisy_S = sampler(scale=scale)
_, coefs, _ = lasso_path(X, y, Xy = noisy_S, precompute=XTX, alphas=alpha_grid)
success += np.abs(np.sign(coefs))
selected = np.apply_along_axis(lambda row: any(x>min_success for x in row), 1, success)
vars = set(np.nonzero(selected)[0])
return vars
selection_algorithm = functools.partial(meta_algorithm, X, XTXi)
# run selection algorithm
df = full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(6, 10),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'HIV_stability_selection.csv'
outbase = csvfile[:-4]
if df is not None or i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
if df is not None:
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
|
bsd-3-clause
|
dvav/dgeclust
|
dgeclust/postprocessing.py
|
1
|
4508
|
from __future__ import division
import os
import itertools as it
import multiprocessing as mp
import numpy as np
import pandas as pd
########################################################################################################################
def _compute_pvals(args):
"""Given a number of samples, compute posterior probabilities of non-differential expression between groups"""
samples, (indir, igroup1, igroup2) = args
## read sample and identify differentially expressed features
p = 0
for sample in samples:
fname = os.path.join(indir, str(sample))
z = np.loadtxt(fname, dtype='int', usecols=(igroup1, igroup2)).T
p += z[0] == z[1]
## return
return p
########################################################################################################################
def compare_groups(data, model, group1, group2, t0=5000, tend=10000, dt=1, nthreads=None):
"""For each gene, compute the posterior probability of non-differential expression between group1 and group2"""
indir = model.fnames['z']
## fetch feature names and groups
gene_names = data.counts.index
groups = data.groups.keys() # order is preserved here
igroup1 = [i for i, v in enumerate(groups) if v == group1][0]
igroup2 = [i for i, v in enumerate(groups) if v == group2][0]
## prepare for multiprocessing
nthreads = mp.cpu_count() if nthreads is None or nthreads <= 0 else nthreads
pool = None if nthreads == 1 else mp.Pool(processes=nthreads)
## prepare samples
samples = np.asarray(os.listdir(indir), dtype='int')
idxs = (samples >= t0) & (samples <= tend) & (np.arange(samples.size) % dt == 0)
samples = samples[idxs]
nsamples = samples.size
## compute un-normalized values of posteriors
chunk_size = int(nsamples / nthreads + 1)
chunks = [samples[i:i+chunk_size] for i in range(0, nsamples, chunk_size)]
args = zip(chunks, it.repeat((indir, igroup1, igroup2)))
if pool is None:
p = map(_compute_pvals, args)
else:
p = pool.map(_compute_pvals, args)
## compute posteriors, FDR and FWER
post = np.sum(list(p), 0) / nsamples
ii = post.argsort()
tmp = post[ii].cumsum() / np.arange(1, post.size+1)
fdr = np.zeros(post.shape)
fdr[ii] = tmp
# pro = post / post.sum()
# run = pro[ii].cumsum() / pro.size
# fwer = np.zeros(pro.shape)
# fwer[ii] = run
## return
return pd.DataFrame(np.vstack((post, fdr)).T, columns=('Posteriors', 'FDR'), index=gene_names), nsamples
########################################################################################################################
def _compute_similarity_vector(args):
"""Given a sample, calculate gene- or group-wise similarity matrix"""
samples, (indir, inc, compare_genes) = args
## read sample
sim_vec = 0
for sample in samples:
fname = os.path.join(indir, str(sample))
z = np.loadtxt(fname, dtype='int')
z = z if compare_genes is True else z.T
z = z[inc] if inc is not None else z
## calculate un-normalised similarity matrix
nrows, ncols = z.shape
sim = [np.sum(z[i] == z[i+1:], 1) for i in range(nrows-1)]
sim_vec += np.hstack(sim) / ncols
## return
return sim_vec / samples.size
def compute_similarity_vector(model, t0=5000, tend=10000, dt=1, inc=None, compare_genes=False, nthreads=None):
"""Calculate gene- or group-wise similarity matrix"""
indir = model.fnames['z']
## prepare for multiprocessing
nthreads = mp.cpu_count() if nthreads is None or nthreads <= 0 else nthreads
pool = None if nthreads == 1 else mp.Pool(processes=nthreads)
## prepare samples
samples = np.asarray(os.listdir(indir), dtype='int')
idxs = (samples >= t0) & (samples <= tend) & (np.arange(samples.size) % dt == 0)
samples = samples[idxs]
nsamples = samples.size
## compute similarity matrices for each sample
chunk_size = int(nsamples / nthreads + 1)
chunks = [samples[i:i+chunk_size] for i in range(0, nsamples, chunk_size)]
args = zip(chunks, it.repeat((indir, inc, compare_genes)))
if pool is None:
vec = map(_compute_similarity_vector, args)
else:
vec = pool.map(_compute_similarity_vector, args)
## return
return np.mean(vec, 0), nsamples
########################################################################################################################
|
mit
|
yunfeilu/scikit-learn
|
sklearn/utils/tests/test_class_weight.py
|
90
|
12846
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/core/frame.py
|
3
|
219601
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.common import (_try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.core.computation.expressions as expressions
import pandas.core.algorithms as algorithms
from pandas.core.computation.eval import eval as _eval
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.core.base as base
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.format as fmt
import pandas.io.formats.console as console
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='')
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
>>> d = {'col1': ts1, 'col2': ts2}
>>> df = DataFrame(data=d, index=index)
>>> df2 = DataFrame(np.random.randn(10, 5))
>>> df3 = DataFrame(np.random.randn(10, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular Dataframe.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if get_option('display.latex.repr'):
return self.to_latex()
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict'):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
Returns
-------
result : dict like {column -> {index -> value}}
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
if orient.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
'data': lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
return dict((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [dict((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
.. versionadded:: 0.16.0
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when wirting the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
@Substitution(header='Write out column names. If a list of string is given, \
it is assumed to be aliases for the column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
@Substitution(header='Write out column names. If a list of string is given, \
it is assumed to be aliases for the column names.')
@Appender(fmt.common_docstring + fmt.return_docstring, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=True,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
r"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
`to_latex`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
Default: False.
Use a longtable environment instead of tabular. Requires adding
a \usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
Default: True.
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : boolean, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : boolean, default False
Use \multirow to enhance MultiIndex rows.
Requires adding a \usepackage{multirow} to your LaTeX preamble.
Will print centered labels (instead of top-aligned)
across the contained rows, separating groups via clines.
The default will be read from the pandas config module.
.. versionadded:: 0.20.0
"""
# Get defaults from the pandas config
if longtable is None:
longtable = get_option("display.latex.longtable")
if escape is None:
escape = get_option("display.latex.escape")
if multicolumn is None:
multicolumn = get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = get_option("display.latex.multirow")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.io.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except TypeError:
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self.get_value(index, col, takeable=True)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced.from_array(values,
index=self.index,
name=label,
fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=None, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool
If the expression contains an assignment, whether to return a new
DataFrame or mutate the existing.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : list-like
A list of dtypes or strings to be included/excluded. You must pass
in a non-empty sequence for at least one of these.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
TypeError
* If either of ``include`` or ``exclude`` is not a sequence
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use np.datetime64, 'datetime' or 'datetime64'
* To select timedeltas, use np.timedelta64, 'timedelta' or
'timedelta64'
* To select Pandas categorical dtypes, use 'category'
* To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0),
or a 'datetime64[ns, tz]' string
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
include, exclude = include or (), exclude or ()
if not (is_list_like(include) and is_list_like(exclude)):
raise TypeError('include and exclude must both be non-string'
' sequences')
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not is_bool_dtype(key.values):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
If `allow_duplicates` is False, raises Exception if column
is already contained in the DataFrame.
Parameters
----------
loc : int
Must have 0 <= loc <= len(columns)
column : object
value : scalar, Series, or array-like
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. To make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# ... and then assign
for k, v in sorted(results.items()):
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = maybe_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> indexed_df = df.set_index(['A', 'B'])
>>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])
>>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (self[col].values for col in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
def trans(v):
if needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(trans(k))
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicitly '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, pls use "
".sort_values(by=...)", FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
convert=False, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
DEPRECATED: use :meth:`DataFrame.sort_index`
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
new_data = self._data.eval(func=func, other=other,
raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
# don't overwrite columns unecessarily
if mask.all():
continue
else:
mask = notnull(this)
self[col] = expressions.where(mask, this, that,
raise_on_error=True)
# ----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][-1]
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.reshape import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
>>> df.iloc[3:7] = np.nan
Aggregate these functions across all columns
>>> df.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
See also
--------
pandas.DataFrame.apply
pandas.DataFrame.transform
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.aggregate
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
ignore_failures = kwds.pop('ignore_failures', False)
# dispatch to agg
if axis == 0 and isinstance(func, (list, dict)):
return self.aggregate(func, axis=axis, *args, **kwds)
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
if axis:
kwds['axis'] = axis
return getattr(self, func)(*args, **kwds)
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
with np.errstate(all='ignore'):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(
f, axis,
reduce=reduce,
ignore_failures=ignore_failures)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if (reduce and axis == 1 and self._is_mixed_type and
self._is_datelike_mixed_type):
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis),
dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1)
for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(values,
res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.asobject, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
.. versionadded:: 0.17.0
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notnull(frame).sum(axis=axis)
else:
counts = notnull(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notnull(frame.values) might
# upcast everything to object
mask = notnull(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notnull(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
result = self.apply(f, reduce=False,
ignore_failures=True)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.core.reshape.concat import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=NA)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None, **kwds):
from pandas.plotting._core import boxplot
import matplotlib.pyplot as plt
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
|
mit
|
prabhamatta/Analyzing-Open-Data
|
notebooks/Day_06_C_Calculating_Diversity_Preview.py
|
3
|
2035
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%pylab --no-import-all inline
# <codecell>
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame, Series, Index
import pandas as pd
# <codecell>
import census
import us
import settings
# <markdowncell>
# The census documentation has example URLs but needs your API key to work. In this notebook, we'll use the IPython notebook HTML display mechanism to help out.
# <codecell>
c = census.Census(key=settings.CENSUS_KEY)
# <markdowncell>
# http://www.census.gov/developers/data/sf1.xml
#
# compare to http://www.census.gov/prod/cen2010/briefs/c2010br-02.pdf
#
# I think the P0050001 might be the key category
#
# * P0010001 = P0050001
# * P0050001 = P0050002 + P0050010
#
# P0050002 Not Hispanic or Latino (total) =
#
# * P0050003 Not Hispanic White only
# * P0050004 Not Hispanic Black only
# * P0050006 Not Hispanic Asian only
# * Not Hispanic Other (should also be P0050002 - (P0050003 + P0050004 + P0050006)
# * P0050005 Not Hispanic: American Indian/ American Indian and Alaska Native alone
# * P0050007 Not Hispanic: Native Hawaiian and Other Pacific Islander alone
# * P0050008 Not Hispanic: Some Other Race alone
# * P0050009 Not Hispanic: Two or More Races
#
# * P0050010 Hispanic or Latino
#
# P0050010 = P0050011...P0050017
#
# "Whites are coded as blue; African-Americans, green; Asians, red; Hispanics, orange; and all other racial categories are coded as brown."
# <headingcell level=1>
# Planned for next week
# <markdowncell>
# We will be calculating for each of the following geographic entities:
#
# * states
# * counties
# * places
# * metropolitan areas
# * combined statistical areas
#
# these quantities:
#
# * the total number of Hispanics/Latinos vs non-Hispanics/Latinos
# * numbers of people in the categories found in the [Racial Dot Map](http://bit.ly/rdotmap)
# * the diversity index
#
# With any luck, we'll also plot quantities on maps too.
|
apache-2.0
|
ARudiuk/mne-python
|
examples/realtime/plot_compute_rt_average.py
|
8
|
1867
|
"""
========================================================
Compute real-time evoked responses using moving averages
========================================================
This example demonstrates how to connect to an MNE Real-time server
using the RtClient and use it together with RtEpochs to compute
evoked responses using moving averages.
Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
has to be running on the same computer.
"""
# Authors: Martin Luessi <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.realtime import RtEpochs, MockRtClient
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
decim=1, reject=dict(grad=4000e-13, eog=150e-6))
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
ev.pick_types(meg=True, eog=False) # leave out the eog channel
if ii == 0:
evoked = ev
else:
evoked += ev
plt.clf() # clear canvas
evoked.plot(axes=plt.gca()) # plot on current figure
plt.pause(0.05)
|
bsd-3-clause
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_lib.py
|
7
|
9567
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
class TestMisc(tm.TestCase):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
self.assertTrue(lib.max_len_string_array(arr), 3)
# unicode
arr = a.astype('U').astype(object)
self.assertTrue(lib.max_len_string_array(arr), 3)
# bytes for python3
arr = a.astype('S').astype(object)
self.assertTrue(lib.max_len_string_array(arr), 3)
# raises
tm.assertRaises(TypeError,
lambda: lib.max_len_string_array(arr.astype('U')))
def test_fast_unique_multiple_list_gen_sort(self):
keys = [['p', 'a'], ['n', 'd'], ['a', 's']]
gen = (key for key in keys)
expected = np.array(['a', 'd', 'n', 'p', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=True)
tm.assert_numpy_array_equal(np.array(out), expected)
gen = (key for key in keys)
expected = np.array(['p', 'a', 'n', 'd', 's'])
out = lib.fast_unique_multiple_list_gen(gen, sort=False)
tm.assert_numpy_array_equal(np.array(out), expected)
class TestIndexing(tm.TestCase):
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2],
[2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
with self.assertRaises(IndexError):
target[indices]
with self.assertRaises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
with self.assertRaises(IndexError):
target[indices]
with self.assertRaises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices],
target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_booleans_to_slice(self):
arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
result = lib.maybe_booleans_to_slice(arr)
self.assertTrue(result.dtype == np.bool_)
result = lib.maybe_booleans_to_slice(arr[:0])
self.assertTrue(result == slice(0, 0))
def test_get_reverse_indexer(self):
indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64)
result = lib.get_reverse_indexer(indexer, 5)
expected = np.array([4, 2, 3, 6, 7], dtype=np.int64)
self.assertTrue(np.array_equal(result, expected))
class TestNullObj(tm.TestCase):
_1d_methods = ['isnullobj', 'isnullobj_old']
_2d_methods = ['isnullobj2d', 'isnullobj2d_old']
def _check_behavior(self, arr, expected):
for method in TestNullObj._1d_methods:
result = getattr(lib, method)(arr)
tm.assert_numpy_array_equal(result, expected)
arr = np.atleast_2d(arr)
expected = np.atleast_2d(expected)
for method in TestNullObj._2d_methods:
result = getattr(lib, method)(arr)
tm.assert_numpy_array_equal(result, expected)
def test_basic(self):
arr = np.array([1, None, 'foo', -5.1, pd.NaT, np.nan])
expected = np.array([False, True, False, False, True, True])
self._check_behavior(arr, expected)
def test_non_obj_dtype(self):
arr = np.array([1, 3, np.nan, 5], dtype=float)
expected = np.array([False, False, True, False])
self._check_behavior(arr, expected)
def test_empty_arr(self):
arr = np.array([])
expected = np.array([], dtype=bool)
self._check_behavior(arr, expected)
def test_empty_str_inp(self):
arr = np.array([""]) # empty but not null
expected = np.array([False])
self._check_behavior(arr, expected)
def test_empty_like(self):
# see gh-13717: no segfaults!
arr = np.empty_like([None])
expected = np.array([True])
self._check_behavior(arr, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-3.0
|
jcmgray/xarray
|
xarray/tests/test_computation.py
|
1
|
33312
|
import functools
import operator
import pickle
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
import xarray as xr
from xarray.core.computation import (
_UFuncSignature, apply_ufunc, broadcast_compat_data, collect_dict_values,
join_dict_keys, ordered_set_intersection, ordered_set_union, result_name,
unified_dim_sizes)
from . import raises_regex, requires_dask, has_dask
def assert_identical(a, b):
if hasattr(a, 'identical'):
msg = 'not identical:\n%r\n%r' % (a, b)
assert a.identical(b), msg
else:
assert_array_equal(a, b)
def test_signature_properties():
sig = _UFuncSignature([['x'], ['x', 'y']], [['z']])
assert sig.input_core_dims == (('x',), ('x', 'y'))
assert sig.output_core_dims == (('z',),)
assert sig.all_input_core_dims == frozenset(['x', 'y'])
assert sig.all_output_core_dims == frozenset(['z'])
assert sig.num_inputs == 2
assert sig.num_outputs == 1
assert str(sig) == '(x),(x,y)->(z)'
assert sig.to_gufunc_string() == '(dim0),(dim0,dim1)->(dim2)'
# dimension names matter
assert _UFuncSignature([['x']]) != _UFuncSignature([['y']])
def test_result_name():
class Named(object):
def __init__(self, name=None):
self.name = name
assert result_name([1, 2]) is None
assert result_name([Named()]) is None
assert result_name([Named('foo'), 2]) == 'foo'
assert result_name([Named('foo'), Named('bar')]) is None
assert result_name([Named('foo'), Named()]) is None
def test_ordered_set_union():
assert list(ordered_set_union([[1, 2]])) == [1, 2]
assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3]
def test_ordered_set_intersection():
assert list(ordered_set_intersection([[1, 2]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1]
assert list(ordered_set_intersection([[1, 2], [2]])) == [2]
def test_join_dict_keys():
dicts = [OrderedDict.fromkeys(keys) for keys in [['x', 'y'], ['y', 'z']]]
assert list(join_dict_keys(dicts, 'left')) == ['x', 'y']
assert list(join_dict_keys(dicts, 'right')) == ['y', 'z']
assert list(join_dict_keys(dicts, 'inner')) == ['y']
assert list(join_dict_keys(dicts, 'outer')) == ['x', 'y', 'z']
with pytest.raises(ValueError):
join_dict_keys(dicts, 'exact')
with pytest.raises(KeyError):
join_dict_keys(dicts, 'foobar')
def test_collect_dict_values():
dicts = [{'x': 1, 'y': 2, 'z': 3}, {'z': 4}, 5]
expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]]
collected = collect_dict_values(dicts, ['x', 'y', 'z'], fill_value=0)
assert collected == expected
def identity(x):
return x
def test_apply_identity():
array = np.arange(10)
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
apply_identity = functools.partial(apply_ufunc, identity)
assert_identical(array, apply_identity(array))
assert_identical(variable, apply_identity(variable))
assert_identical(data_array, apply_identity(data_array))
assert_identical(data_array, apply_identity(data_array.groupby('x')))
assert_identical(dataset, apply_identity(dataset))
assert_identical(dataset, apply_identity(dataset.groupby('x')))
def add(a, b):
return apply_ufunc(operator.add, a, b)
def test_apply_two_inputs():
array = np.array([1, 2, 3])
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
zero_array = np.zeros_like(array)
zero_variable = xr.Variable('x', zero_array)
zero_data_array = xr.DataArray(zero_variable, [('x', -array)])
zero_dataset = xr.Dataset({'y': zero_variable}, {'x': -array})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby('x'), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby('x')))
assert_identical(dataset, add(data_array.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby('x')))
assert_identical(dataset, add(dataset.groupby('x'), zero_data_array))
assert_identical(dataset, add(dataset.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby('x')))
assert_identical(dataset, add(zero_dataset, dataset.groupby('x')))
def test_apply_1d_and_0d():
array = np.array([1, 2, 3])
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
zero_array = 0
zero_variable = xr.Variable((), zero_array)
zero_data_array = xr.DataArray(zero_variable)
zero_dataset = xr.Dataset({'y': zero_variable})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby('x'), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby('x')))
assert_identical(dataset, add(data_array.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby('x')))
assert_identical(dataset, add(dataset.groupby('x'), zero_data_array))
assert_identical(dataset, add(dataset.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby('x')))
assert_identical(dataset, add(zero_dataset, dataset.groupby('x')))
def test_apply_two_outputs():
array = np.arange(5)
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []])
out0, out1 = twice(array)
assert_identical(out0, array)
assert_identical(out1, array)
out0, out1 = twice(variable)
assert_identical(out0, variable)
assert_identical(out1, variable)
out0, out1 = twice(data_array)
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset)
assert_identical(out0, dataset)
assert_identical(out1, dataset)
out0, out1 = twice(data_array.groupby('x'))
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset.groupby('x'))
assert_identical(out0, dataset)
assert_identical(out1, dataset)
def test_apply_input_core_dimension():
def first_element(obj, dim):
def func(x):
return x[..., 0]
return apply_ufunc(func, obj, input_core_dims=[[dim]])
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(['x', 'y'], array)
data_array = xr.DataArray(variable, {'x': ['a', 'b'], 'y': [-1, -2]})
dataset = xr.Dataset({'data': data_array})
expected_variable_x = xr.Variable(['y'], [1, 2])
expected_data_array_x = xr.DataArray(expected_variable_x, {'y': [-1, -2]})
expected_dataset_x = xr.Dataset({'data': expected_data_array_x})
expected_variable_y = xr.Variable(['x'], [1, 3])
expected_data_array_y = xr.DataArray(expected_variable_y,
{'x': ['a', 'b']})
expected_dataset_y = xr.Dataset({'data': expected_data_array_y})
assert_identical(expected_variable_x, first_element(variable, 'x'))
assert_identical(expected_variable_y, first_element(variable, 'y'))
assert_identical(expected_data_array_x, first_element(data_array, 'x'))
assert_identical(expected_data_array_y, first_element(data_array, 'y'))
assert_identical(expected_dataset_x, first_element(dataset, 'x'))
assert_identical(expected_dataset_y, first_element(dataset, 'y'))
assert_identical(expected_data_array_x,
first_element(data_array.groupby('y'), 'x'))
assert_identical(expected_dataset_x,
first_element(dataset.groupby('y'), 'x'))
def test_apply_output_core_dimension():
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[['sign']])
if isinstance(result, (xr.Dataset, xr.DataArray)):
result.coords['sign'] = [1, -1]
return result
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(['x', 'y'], array)
data_array = xr.DataArray(variable, {'x': ['a', 'b'], 'y': [-1, -2]})
dataset = xr.Dataset({'data': data_array})
stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]])
stacked_variable = xr.Variable(['x', 'y', 'sign'], stacked_array)
stacked_coords = {'x': ['a', 'b'], 'y': [-1, -2], 'sign': [1, -1]}
stacked_data_array = xr.DataArray(stacked_variable, stacked_coords)
stacked_dataset = xr.Dataset({'data': stacked_data_array})
assert_identical(stacked_array, stack_negative(array))
assert_identical(stacked_variable, stack_negative(variable))
assert_identical(stacked_data_array, stack_negative(data_array))
assert_identical(stacked_dataset, stack_negative(dataset))
assert_identical(stacked_data_array,
stack_negative(data_array.groupby('x')))
assert_identical(stacked_dataset,
stack_negative(dataset.groupby('x')))
def original_and_stack_negative(obj):
def func(x):
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ['sign']])
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
result[1].coords['sign'] = [1, -1]
return result
out0, out1 = original_and_stack_negative(array)
assert_identical(array, out0)
assert_identical(stacked_array, out1)
out0, out1 = original_and_stack_negative(variable)
assert_identical(variable, out0)
assert_identical(stacked_variable, out1)
out0, out1 = original_and_stack_negative(data_array)
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset)
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
out0, out1 = original_and_stack_negative(data_array.groupby('x'))
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset.groupby('x'))
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
def test_apply_exclude():
def concatenate(objects, dim='x'):
def func(*x):
return np.concatenate(x, axis=-1)
result = apply_ufunc(func, *objects,
input_core_dims=[[dim]] * len(objects),
output_core_dims=[[dim]],
exclude_dims={dim})
if isinstance(result, (xr.Dataset, xr.DataArray)):
# note: this will fail if dim is not a coordinate on any input
new_coord = np.concatenate([obj.coords[dim] for obj in objects])
result.coords[dim] = new_coord
return result
arrays = [np.array([1]), np.array([2, 3])]
variables = [xr.Variable('x', a) for a in arrays]
data_arrays = [xr.DataArray(v, {'x': c, 'y': ('x', range(len(c)))})
for v, c in zip(variables, [['a'], ['b', 'c']])]
datasets = [xr.Dataset({'data': data_array}) for data_array in data_arrays]
expected_array = np.array([1, 2, 3])
expected_variable = xr.Variable('x', expected_array)
expected_data_array = xr.DataArray(expected_variable, [('x', list('abc'))])
expected_dataset = xr.Dataset({'data': expected_data_array})
assert_identical(expected_array, concatenate(arrays))
assert_identical(expected_variable, concatenate(variables))
assert_identical(expected_data_array, concatenate(data_arrays))
assert_identical(expected_dataset, concatenate(datasets))
# must also be a core dimension
with pytest.raises(ValueError):
apply_ufunc(identity, variables[0], exclude_dims={'x'})
def test_apply_groupby_add():
array = np.arange(5)
variable = xr.Variable('x', array)
coords = {'x': -array, 'y': ('x', [0, 0, 1, 1, 2])}
data_array = xr.DataArray(variable, coords, dims='x')
dataset = xr.Dataset({'z': variable}, coords)
other_variable = xr.Variable('y', [0, 10])
other_data_array = xr.DataArray(other_variable, dims='y')
other_dataset = xr.Dataset({'z': other_variable})
expected_variable = xr.Variable('x', [0, 1, 12, 13, np.nan])
expected_data_array = xr.DataArray(expected_variable, coords, dims='x')
expected_dataset = xr.Dataset({'z': expected_variable}, coords)
assert_identical(expected_data_array,
add(data_array.groupby('y'), other_data_array))
assert_identical(expected_dataset,
add(data_array.groupby('y'), other_dataset))
assert_identical(expected_dataset,
add(dataset.groupby('y'), other_data_array))
assert_identical(expected_dataset,
add(dataset.groupby('y'), other_dataset))
# cannot be performed with xarray.Variable objects that share a dimension
with pytest.raises(ValueError):
add(data_array.groupby('y'), other_variable)
# if they are all grouped the same way
with pytest.raises(ValueError):
add(data_array.groupby('y'), data_array[:4].groupby('y'))
with pytest.raises(ValueError):
add(data_array.groupby('y'), data_array[1:].groupby('y'))
with pytest.raises(ValueError):
add(data_array.groupby('y'), other_data_array.groupby('y'))
with pytest.raises(ValueError):
add(data_array.groupby('y'), data_array.groupby('x'))
def test_unified_dim_sizes():
assert unified_dim_sizes([xr.Variable((), 0)]) == OrderedDict()
assert (unified_dim_sizes([xr.Variable('x', [1]),
xr.Variable('x', [1])]) ==
OrderedDict([('x', 1)]))
assert (unified_dim_sizes([xr.Variable('x', [1]),
xr.Variable('y', [1, 2])]) ==
OrderedDict([('x', 1), ('y', 2)]))
assert (unified_dim_sizes([xr.Variable(('x', 'z'), [[1]]),
xr.Variable(('y', 'z'), [[1, 2], [3, 4]])],
exclude_dims={'z'}) ==
OrderedDict([('x', 1), ('y', 2)]))
# duplicate dimensions
with pytest.raises(ValueError):
unified_dim_sizes([xr.Variable(('x', 'x'), [[1]])])
# mismatched lengths
with pytest.raises(ValueError):
unified_dim_sizes(
[xr.Variable('x', [1]), xr.Variable('x', [1, 2])])
def test_broadcast_compat_data_1d():
data = np.arange(5)
var = xr.Variable('x', data)
assert_identical(data, broadcast_compat_data(var, ('x',), ()))
assert_identical(data, broadcast_compat_data(var, (), ('x',)))
assert_identical(data[:], broadcast_compat_data(var, ('w',), ('x',)))
assert_identical(data[:, None],
broadcast_compat_data(var, ('w', 'x', 'y'), ()))
with pytest.raises(ValueError):
broadcast_compat_data(var, ('x',), ('w',))
with pytest.raises(ValueError):
broadcast_compat_data(var, (), ())
def test_broadcast_compat_data_2d():
data = np.arange(12).reshape(3, 4)
var = xr.Variable(['x', 'y'], data)
assert_identical(data, broadcast_compat_data(var, ('x', 'y'), ()))
assert_identical(data, broadcast_compat_data(var, ('x',), ('y',)))
assert_identical(data, broadcast_compat_data(var, (), ('x', 'y')))
assert_identical(data.T, broadcast_compat_data(var, ('y', 'x'), ()))
assert_identical(data.T, broadcast_compat_data(var, ('y',), ('x',)))
assert_identical(data, broadcast_compat_data(var, ('w', 'x'), ('y',)))
assert_identical(data, broadcast_compat_data(var, ('w',), ('x', 'y')))
assert_identical(data.T, broadcast_compat_data(var, ('w',), ('y', 'x')))
assert_identical(data[:, :, None],
broadcast_compat_data(var, ('w', 'x', 'y', 'z'), ()))
assert_identical(data[None, :, :].T,
broadcast_compat_data(var, ('w', 'y', 'x', 'z'), ()))
def test_keep_attrs():
def add(a, b, keep_attrs):
if keep_attrs:
return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs)
else:
return apply_ufunc(operator.add, a, b)
a = xr.DataArray([0, 1], [('x', [0, 1])])
a.attrs['attr'] = 'da'
a['x'].attrs['attr'] = 'da_coord'
b = xr.DataArray([1, 2], [('x', [0, 1])])
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
assert_identical(actual['x'].attrs, a['x'].attrs)
actual = add(a.variable, b.variable, keep_attrs=False)
assert not actual.attrs
actual = add(a.variable, b.variable, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
a = xr.Dataset({'x': [0, 1]})
a.attrs['attr'] = 'ds'
a.x.attrs['attr'] = 'da'
b = xr.Dataset({'x': [0, 1]})
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
assert_identical(actual.x.attrs, a.x.attrs)
def test_dataset_join():
ds0 = xr.Dataset({'a': ('x', [1, 2]), 'x': [0, 1]})
ds1 = xr.Dataset({'a': ('x', [99, 3]), 'x': [1, 2]})
# by default, cannot have different labels
with raises_regex(ValueError, 'indexes .* are not equal'):
apply_ufunc(operator.add, ds0, ds1)
with raises_regex(TypeError, 'must supply'):
apply_ufunc(operator.add, ds0, ds1, dataset_join='outer')
def add(a, b, join, dataset_join):
return apply_ufunc(operator.add, a, b, join=join,
dataset_join=dataset_join,
dataset_fill_value=np.nan)
actual = add(ds0, ds1, 'outer', 'inner')
expected = xr.Dataset({'a': ('x', [np.nan, 101, np.nan]),
'x': [0, 1, 2]})
assert_identical(actual, expected)
actual = add(ds0, ds1, 'outer', 'outer')
assert_identical(actual, expected)
with raises_regex(ValueError, 'data variable names'):
apply_ufunc(operator.add, ds0, xr.Dataset({'b': 1}))
ds2 = xr.Dataset({'b': ('x', [99, 3]), 'x': [1, 2]})
actual = add(ds0, ds2, 'outer', 'inner')
expected = xr.Dataset({'x': [0, 1, 2]})
assert_identical(actual, expected)
# we used np.nan as the fill_value in add() above
actual = add(ds0, ds2, 'outer', 'outer')
expected = xr.Dataset({'a': ('x', [np.nan, np.nan, np.nan]),
'b': ('x', [np.nan, np.nan, np.nan]),
'x': [0, 1, 2]})
assert_identical(actual, expected)
@requires_dask
def test_apply_dask():
import dask.array as da
array = da.ones((2,), chunks=2)
variable = xr.Variable('x', array)
coords = xr.DataArray(variable).coords.variables
data_array = xr.DataArray(variable, coords, fastpath=True)
dataset = xr.Dataset({'y': variable})
# encountered dask array, but did not set dask='allowed'
with pytest.raises(ValueError):
apply_ufunc(identity, array)
with pytest.raises(ValueError):
apply_ufunc(identity, variable)
with pytest.raises(ValueError):
apply_ufunc(identity, data_array)
with pytest.raises(ValueError):
apply_ufunc(identity, dataset)
# unknown setting for dask array handling
with pytest.raises(ValueError):
apply_ufunc(identity, array, dask='unknown')
def dask_safe_identity(x):
return apply_ufunc(identity, x, dask='allowed')
assert array is dask_safe_identity(array)
actual = dask_safe_identity(variable)
assert isinstance(actual.data, da.Array)
assert_identical(variable, actual)
actual = dask_safe_identity(data_array)
assert isinstance(actual.data, da.Array)
assert_identical(data_array, actual)
actual = dask_safe_identity(dataset)
assert isinstance(actual['y'].data, da.Array)
assert_identical(dataset, actual)
@requires_dask
def test_apply_dask_parallelized_one_arg():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=('x', 'y'))
def parallel_identity(x):
return apply_ufunc(identity, x, dask='parallelized',
output_dtypes=[x.dtype])
actual = parallel_identity(data_array)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
computed = data_array.compute()
actual = parallel_identity(computed)
assert_identical(computed, actual)
@requires_dask
def test_apply_dask_parallelized_two_args():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64)
data_array = xr.DataArray(array, dims=('x', 'y'))
data_array.name = None
def parallel_add(x, y):
return apply_ufunc(operator.add, x, y,
dask='parallelized',
output_dtypes=[np.int64])
def check(x, y):
actual = parallel_add(x, y)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
check(data_array, 0),
check(0, data_array)
check(data_array, xr.DataArray(0))
check(data_array, 0 * data_array)
check(data_array, 0 * data_array[0])
check(data_array[:, 0], 0 * data_array[0])
check(data_array, 0 * data_array.compute())
@requires_dask
def test_apply_dask_parallelized_errors():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=('x', 'y'))
with pytest.raises(NotImplementedError):
apply_ufunc(identity, data_array, output_core_dims=[['z'], ['z']],
dask='parallelized')
with raises_regex(ValueError, 'dtypes'):
apply_ufunc(identity, data_array, dask='parallelized')
with raises_regex(TypeError, 'list'):
apply_ufunc(identity, data_array, dask='parallelized',
output_dtypes=float)
with raises_regex(ValueError, 'must have the same length'):
apply_ufunc(identity, data_array, dask='parallelized',
output_dtypes=[float, float])
with raises_regex(ValueError, 'output_sizes'):
apply_ufunc(identity, data_array, output_core_dims=[['z']],
output_dtypes=[float], dask='parallelized')
with raises_regex(ValueError, 'at least one input is an xarray object'):
apply_ufunc(identity, array, dask='parallelized')
with raises_regex(ValueError, 'consists of multiple chunks'):
apply_ufunc(identity, data_array, dask='parallelized',
output_dtypes=[float],
input_core_dims=[('y',)],
output_core_dims=[('y',)])
@requires_dask
def test_apply_dask_multiple_inputs():
import dask.array as da
def covariance(x, y):
return ((x - x.mean(axis=-1, keepdims=True)) *
(y - y.mean(axis=-1, keepdims=True))).mean(axis=-1)
rs = np.random.RandomState(42)
array1 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
array2 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
data_array_1 = xr.DataArray(array1, dims=('x', 'z'))
data_array_2 = xr.DataArray(array2, dims=('y', 'z'))
expected = apply_ufunc(
covariance, data_array_1.compute(), data_array_2.compute(),
input_core_dims=[['z'], ['z']])
allowed = apply_ufunc(
covariance, data_array_1, data_array_2, input_core_dims=[['z'], ['z']],
dask='allowed')
assert isinstance(allowed.data, da.Array)
xr.testing.assert_allclose(expected, allowed.compute())
parallelized = apply_ufunc(
covariance, data_array_1, data_array_2, input_core_dims=[['z'], ['z']],
dask='parallelized', output_dtypes=[float])
assert isinstance(parallelized.data, da.Array)
xr.testing.assert_allclose(expected, parallelized.compute())
@requires_dask
def test_apply_dask_new_output_dimension():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=('x', 'y'))
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
return apply_ufunc(func, obj, output_core_dims=[['sign']],
dask='parallelized', output_dtypes=[obj.dtype],
output_sizes={'sign': 2})
expected = stack_negative(data_array.compute())
actual = stack_negative(data_array)
assert actual.dims == ('x', 'y', 'sign')
assert actual.shape == (2, 2, 2)
assert isinstance(actual.data, da.Array)
assert_identical(expected, actual)
def pandas_median(x):
return pd.Series(x).median()
def test_vectorize():
if LooseVersion(np.__version__) < LooseVersion('1.12.0'):
pytest.skip('numpy 1.12 or later to support vectorize=True.')
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=('x', 'y'))
expected = xr.DataArray([1, 2], dims=['x'])
actual = apply_ufunc(pandas_median, data_array,
input_core_dims=[['y']],
vectorize=True)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_dask():
if LooseVersion(np.__version__) < LooseVersion('1.12.0'):
pytest.skip('numpy 1.12 or later to support vectorize=True.')
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=('x', 'y'))
expected = xr.DataArray([1, 2], dims=['x'])
actual = apply_ufunc(pandas_median, data_array.chunk({'x': 1}),
input_core_dims=[['y']],
vectorize=True,
dask='parallelized',
output_dtypes=[float])
assert_identical(expected, actual)
@pytest.mark.parametrize('use_dask', [True, False])
def test_dot(use_dask):
if use_dask:
if not has_dask:
pytest.skip('test for dask.')
import dask
if LooseVersion(dask.__version__) < LooseVersion('0.17.3'):
pytest.skip("needs dask.array.einsum")
a = np.arange(30 * 4).reshape(30, 4)
b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
c = np.arange(5 * 60).reshape(5, 60)
da_a = xr.DataArray(a, dims=['a', 'b'],
coords={'a': np.linspace(0, 1, 30)})
da_b = xr.DataArray(b, dims=['a', 'b', 'c'],
coords={'a': np.linspace(0, 1, 30)})
da_c = xr.DataArray(c, dims=['c', 'e'])
if use_dask:
da_a = da_a.chunk({'a': 3})
da_b = da_b.chunk({'a': 3})
da_c = da_c.chunk({'c': 3})
actual = xr.dot(da_a, da_b, dims=['a', 'b'])
assert actual.dims == ('c', )
assert (actual.data == np.einsum('ij,ijk->k', a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
actual = xr.dot(da_a, da_b)
assert actual.dims == ('c', )
assert (actual.data == np.einsum('ij,ijk->k', a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
# for only a single array is passed without dims argument, just return
# as is
actual = xr.dot(da_a)
assert da_a.identical(actual)
# test for variable
actual = xr.dot(da_a.variable, da_b.variable)
assert actual.dims == ('c', )
assert (actual.data == np.einsum('ij,ijk->k', a, b)).all()
assert isinstance(actual.data, type(da_a.variable.data))
if use_dask:
da_a = da_a.chunk({'a': 3})
da_b = da_b.chunk({'a': 3})
actual = xr.dot(da_a, da_b, dims=['b'])
assert actual.dims == ('a', 'c')
assert (actual.data == np.einsum('ij,ijk->ik', a, b)).all()
assert isinstance(actual.variable.data, type(da_a.variable.data))
actual = xr.dot(da_a, da_b, dims=['b'])
assert actual.dims == ('a', 'c')
assert (actual.data == np.einsum('ij,ijk->ik', a, b)).all()
actual = xr.dot(da_a, da_b, dims='b')
assert actual.dims == ('a', 'c')
assert (actual.data == np.einsum('ij,ijk->ik', a, b)).all()
actual = xr.dot(da_a, da_b, dims='a')
assert actual.dims == ('b', 'c')
assert (actual.data == np.einsum('ij,ijk->jk', a, b)).all()
actual = xr.dot(da_a, da_b, dims='c')
assert actual.dims == ('a', 'b')
assert (actual.data == np.einsum('ij,ijk->ij', a, b)).all()
actual = xr.dot(da_a, da_b, da_c, dims=['a', 'b'])
assert actual.dims == ('c', 'e')
assert (actual.data == np.einsum('ij,ijk,kl->kl ', a, b, c)).all()
# should work with tuple
actual = xr.dot(da_a, da_b, dims=('c', ))
assert actual.dims == ('a', 'b')
assert (actual.data == np.einsum('ij,ijk->ij', a, b)).all()
# default dims
actual = xr.dot(da_a, da_b, da_c)
assert actual.dims == ('e', )
assert (actual.data == np.einsum('ij,ijk,kl->l ', a, b, c)).all()
# 1 array summation
actual = xr.dot(da_a, dims='a')
assert actual.dims == ('b', )
assert (actual.data == np.einsum('ij->j ', a)).all()
# empty dim
actual = xr.dot(da_a.sel(a=[]), da_a.sel(a=[]), dims='a')
assert actual.dims == ('b', )
assert (actual.data == np.zeros(actual.shape)).all()
# Invalid cases
if not use_dask or LooseVersion(dask.__version__) > LooseVersion('0.17.4'):
with pytest.raises(TypeError):
xr.dot(da_a, dims='a', invalid=None)
with pytest.raises(TypeError):
xr.dot(da_a.to_dataset(name='da'), dims='a')
with pytest.raises(TypeError):
xr.dot(dims='a')
# einsum parameters
actual = xr.dot(da_a, da_b, dims=['b'], order='C')
assert (actual.data == np.einsum('ij,ijk->ik', a, b)).all()
assert actual.values.flags['C_CONTIGUOUS']
assert not actual.values.flags['F_CONTIGUOUS']
actual = xr.dot(da_a, da_b, dims=['b'], order='F')
assert (actual.data == np.einsum('ij,ijk->ik', a, b)).all()
# dask converts Fortran arrays to C order when merging the final array
if not use_dask:
assert not actual.values.flags['C_CONTIGUOUS']
assert actual.values.flags['F_CONTIGUOUS']
# einsum has a constant string as of the first parameter, which makes
# it hard to pass to xarray.apply_ufunc.
# make sure dot() uses functools.partial(einsum, subscripts), which
# can be pickled, and not a lambda, which can't.
pickle.loads(pickle.dumps(xr.dot(da_a)))
def test_where():
cond = xr.DataArray([True, False], dims='x')
actual = xr.where(cond, 1, 0)
expected = xr.DataArray([1, 0], dims='x')
assert_identical(expected, actual)
|
apache-2.0
|
anirudhjayaraman/scikit-learn
|
sklearn/utils/multiclass.py
|
45
|
12390
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
|
smorad/ast119
|
hw4.py
|
1
|
3115
|
# ASTR119
# HW4
# Team: Forrest Kerslager, Benjamin Smithers, Steven Morad, Kevin Rodriguez,
# Elliot Ghassemi
from numpy import *
from matplotlib.pyplot import *
import urllib2
import os
from datetime import datetime
import scipy.integrate as integ
import sys
def fetch_file(
url = "http://lasp.colorado.edu/lisird/tss/sorce_ssi.csv?&time>=2010-01-01&time<2011-01-01",
localfile = "data_2010-01-01_2011-01-01"):
# Download with urllib
response = urllib2.urlopen(url)
# Save the file locally
fp = open(localfile, "w")
fp.write(response.read())
# Close handles
fp.close()
response.close()
def validateInput(dt_input):
# Check for valid user input and return true or false
try:
dt = datetime.strptime(dt_input, '%Y-%m-%d')
except ValueError:
return False
return True
def hw4(dt_start = "2010-01-01", dt_end = "2011-01-01"):
# Validate and format user date input
if not validateInput(dt_start) or not validateInput(dt_end):
print "Incorrect format, please enter the date as YYYY-MM-DD"
sys.exit(1)
# If data does not exist, fetch it
localfile = "data_" + dt_start + "_" + dt_end
if not localfile in os.listdir('.'):
print "Warning: file " + localfile + " not found; retrieving from the web"
url = "http://lasp.colorado.edu/lisird/tss/sorce_ssi.csv?&time>=" + dt_start + "&time<" + dt_end
fetch_file(url, localfile)
print "File successfully retrieved"
# Load table into multi dimensional array, [[day, wavelength, flux]...]
table = loadtxt(localfile, skiprows = (1), usecols = (0,1,2), delimiter = ",")
# Extract all unique days
days = unique(table[:,0])
wavelengths = unique(table[:,1])
# Initializing data and constants
flux = []
h = 6.626e-34
c = 3.0e8
# Group flux readings into rows according to unique days
# Each row of table has the format [day, wavelength, intensity]
for day in days:
fluxValues = []
for row in table:
if row[0] == day:
# Calculate flux using intensity and wavelength
fluxValues.append((row[2] * row[1] * (10.0**-9)) / (h * c))
# Throw out days with NaN readings
if not isnan(fluxValues).any():
flux.append(fluxValues)
# Empty array for the series of integrals
Qo2 = []
# Compute Qo2 for each day's flux calculations
for readings in flux:
Qo2.append(integ.simps(readings, x=wavelengths))
# Print the mean Qo2, 25th, and 75th percentile of deltaQo2.
deltaQo2 = (Qo2 / mean(Qo2)) - 1.0
perc25 = percentile(deltaQo2, 25)
perc75 = percentile(deltaQo2, 75)
print("Average value of Q(o2): " + str(mean(Qo2)))
print("Delta Q: 25th Percentile = " + str(perc25) + ", 75th percentile = " +
str(perc75))
# Plot Data
figure()
# Draw grey bar
fill_between(range(len(deltaQo2)), perc25, perc75, facecolor='grey')
# Label
dt_title= str(datetime.strptime(dt_start, '%Y-%m-%d')) + ' - ' + str(datetime.strptime(dt_end, '%Y-%m-%d'))
xlabel('Day')
ylabel('Delta Q(O2)')
title(dt_title)
# Plot Data
plot(range(len(deltaQo2)), deltaQo2)
# TODO: Part 8
|
gpl-2.0
|
admcrae/tensorflow
|
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
|
28
|
9485
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
|
apache-2.0
|
MohammedWasim/scikit-learn
|
examples/linear_model/plot_bayesian_ridge.py
|
248
|
2588
|
"""
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
iarroyof/distributionalSemanticStabilityThesis
|
mkl_test.py
|
2
|
12879
|
from modshogun import *
from numpy import *
from sklearn.metrics import r2_score
from scipy.stats import randint
from scipy import stats
from scipy.stats import randint as sp_randint
from scipy.stats import expon
import sys, os
import Gnuplot, Gnuplot.funcutils
#from pdb import set_trace as st
class mkl_regressor():
def __init__(self, widths = None, kernel_weights = None, svm_c = 0.01, mkl_c = 1.0, svm_norm = 1, mkl_norm = 1, degree = 2,
median_width = None, width_scale = None, min_size=2, max_size = 10, kernel_size = None):
self.svm_c = svm_c
self.mkl_c = mkl_c
self.svm_norm = svm_norm
self.mkl_norm = mkl_norm
self.degree = degree
self.widths = widths
self.kernel_weights = kernel_weights
self.median_width = median_width
self.width_scale = width_scale
self.min_size = min_size
self.max_size = max_size
self.kernel_size = kernel_size
def combine_kernel(self):
self.__kernels = CombinedKernel()
for width in self.widths:
kernel = GaussianKernel()
kernel.set_width(width)
kernel.init(self.__feats_train, self.__feats_train)
self.__kernels.append_kernel(kernel)
del kernel
if self.degree > 0:
kernel = PolyKernel(10, self.degree)
kernel.init(self.__feats_train, self.__feats_train)
self.__kernels.append_kernel(kernel)
del kernel
self.__kernels.init(self.__feats_train, self.__feats_train)
def fit(self, X, y, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
labels_train = RegressionLabels(y.reshape((len(y), )))
self.__feats_train = RealFeatures(X.T)
self.combine_kernel()
binary_svm_solver = SVRLight() # seems to be optional, with LibSVR it does not work.
self.__mkl = MKLRegression(binary_svm_solver)
self.__mkl.set_C(self.svm_c, self.svm_c)
self.__mkl.set_C_mkl(self.mkl_c)
self.__mkl.set_mkl_norm(self.mkl_norm)
self.__mkl.set_mkl_block_norm(self.svm_norm)
self.__mkl.set_kernel(self.__kernels)
self.__mkl.set_labels(labels_train)
try:
self.__mkl.train()
except SystemError as inst:
if "Assertion" in str(inst):
sys.stderr.write("""WARNING: Bad parameter combination: [svm_c %f mkl_c %f mkl_norm %f svm_norm %f, degree %d] \n widths %s \n
MKL error [%s]""" % (self.svm_c, self.mkl_c, self.mkl_norm, self.svm_norm, self.degree, self.widths, str(inst)))
pass
self.kernel_weights = self.__kernels.get_subkernel_weights()
self.kernel_size = len(self.kernel_weights)
self.__loaded = False
def predict(self, X):
self.__feats_test = RealFeatures(X.T)
ft = None
if not self.__loaded:
self.__kernels.init(self.__feats_train, self.__feats_test) # test for test
self.__mkl.set_kernel(self.__kernels)
else:
ft = CombinedFeatures()
for i in xrange(self.__mkl.get_kernel().get_num_subkernels()):
ft.append_feature_obj(self.__feats_test)
return self.__mkl.apply_regression(ft).get_labels()
def set_params(self, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
if self.median_width: # If widths are specified, the specified median has priority, so widths will be automatically overwritten.
self.set_param_weights()
return self
def get_params(self, deep=False):
return {param: getattr(self, param) for param in dir(self) if not param.startswith('__') and not '__' in param and not callable(getattr(self,param))}
def score(self, X_t, y_t):
predicted = self.predict(X_t)
return r2_score(predicted, y_t)
def serialize_model (self, file_name, sl="save"):
from os.path import basename, dirname
from bz2 import BZ2File
import pickle
if sl == "save": mode = "wb"
elif sl == "load": mode = "rb"
else: sys.stderr.write("Bad option. Only 'save' and 'load' are available.")
f = BZ2File(file_name + ".bin", mode)
if not f:
sys.stderr.write("Error serializing kernel matrix.")
exit()
if sl == "save":
pickle.dump(self.__mkl, f, protocol=2)
elif sl == "load":
mkl = self.__mkl = pickle.load(f)
self.__loaded = True
else: sys.stderr.write("Bad option. Only 'save' and 'load' are available.")
def save(self, file_name = None):
""" Python reimplementated function for saving a pretrained MKL machine.
This method saves a trained MKL machine to the file 'file_name'. If not 'file_name' is given, a
dictionary 'mkl_machine' containing parameters of the given trained MKL object is returned.
Here we assumed all subkernels of the passed CombinedKernel are of the same family, so uniquely the
first kernel is used for verifying if the passed 'kernel' is a Gaussian mixture. If it is so, we insert
the 'widths' to the model dictionary 'mkl_machine'. An error is returned otherwise.
"""
self._support = []
self._num_support_vectors = self.__mkl.get_num_support_vectors()
self._bias = self.__mkl.get_bias()
for i in xrange(self._num_support_vectors):
self._support.append((self.__mkl.get_alpha(i), self.__mkl.get_support_vector(i)))
self._kernel_family = self.__kernels.get_first_kernel().get_name()
if file_name:
with open(file_name,'w') as f:
f.write(str(self.get_params())+'\n')
self.serialize_model(file_name, "save")
else:
return self.get_params()
def load(self, file_name):
""" This method receives a 'file.model' file name (if it is not in pwd, full path must be given). The loaded file
must contain at least a dictionary at its top. This dictionary must contain keys from which model
parameters will be read (including weights, C, etc.). For example:
{'bias': value, 'param_1': value,...,'support_vectors': [(idx, value),(idx, value)], param_n: value}
The MKL model is tuned to those parameters stored at the given file. Other file with double extension must
be jointly with the model file: '*file.model.bin' where the kernel matrix is encoded together with the kernel
machine.
"""
# Load machine parameters
with open(file_name, 'r') as pointer:
mkl_machine = eval(pointer.read())
# Set loaded parameters
for parameter, value in mkl_machine.items():
setattr(self, parameter, value)
# Load the machine itself
self.serialize_model(file_name, "load") # Instantiates the loaded MKL.
return self
def set_param_weights(self):
"""Gives a vector of weights which distribution is linear. The 'median' value is used both as location parameter and
for scaling parameter. If not size of the output vector is given, a random size between 'min_size' and 'max_size' is
returned."""
assert self.median_width and self.width_scale and self.kernel_size # Width generation needed parameters
self.minimun_width_scale = 0.01
self.widths = linspace(start = self.median_width*self.minimun_width_scale,
stop = self.median_width*self.width_scale,
num = self.kernel_size)
class expon_vector(stats.rv_continuous):
def __init__(self, loc = 1.0, scale = None, min_size=2, max_size = 10, size = None):
self.loc = loc
self.scale = scale
self.min_size = min_size
self.max_size = max_size
self.size = size
def rvs(self):
if not self.size:
self.size = randint.rvs(low = self.min_size, high = self.max_size, size = 1)
if self.scale:
return expon.rvs(loc = self.loc * 0.09, scale = self.scale, size = self.size)
else:
return expon.rvs(loc = self.loc * 0.09, scale = self.loc * 8.0, size = self.size)
def test_predict(data, machine = None, file=None, labels = None, out_file=None):
g = Gnuplot.Gnuplot()
if type(machine) is str:
if "mkl_regerssion" == machine:
machine_ = mkl_regressor()
machine_.load(model_file)
# elif other machine types ...
else:
print "Error machine type"
exit()
# elif other machine types ...
else:
machine_ = machine
preds = machine_.predict(data)
if labels is not None:
r2 = r2_score(preds, labels)
print "R^2: ", r2
pred, real = zip(*sorted(zip(preds, labels), key=lambda tup: tup[1]))
else:
pred = preds; real = range(len(pred))
if out_file:
output = {}
output['learned_model'] = out_file
output['estimated_output'] = preds
output['best_params'] = machine_.get_params()
output['performance'] = r2
with open(out_file, "a") as f:
f.write(str(output)+'\n')
print "Machine Parameters: ", machine_.get_params()
g.plot(Gnuplot.Data(pred, with_="lines"), Gnuplot.Data(real, with_="linesp") )
if __name__ == "__main__":
from sklearn.grid_search import RandomizedSearchCV as RS
# labels = loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/STS.gs.OnWN.txt")
# data = loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/vectors_H10/pairs_eng-NO-test-2e6-nonempty_OnWN_d2v_H10_sub_m5w8.mtx")
# labels_t = loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/STS.gs.FNWN.txt")
# data_t = loadtxt("/almac/ignacio/data/sts_all/pairs-NO_2013/vectors_H10/pairs_eng-NO-test-2e6-nonempty_FNWN_d2v_H10_sub_m5w8.mtx")
# from sklearn.grid_search import RandomizedSearchCV as RS
labels = array([2.0,0.0,2.0,1.0,3.0,2.0])
labels = labels.reshape((len(labels), 1))
data = array([[1.0,2.0,3.0],[1.0,2.0,9.0],[1.0,2.0,3.0],[1.0,2.0,0.0],[0.0,2.0,3.0],[1.0,2.0,3.0]])
labels_t = array([1.,3.,4])
labels_t = labels_t.reshape((len(labels_t), 1))
data_t = array([[20.0,30.0,40.0],[10.0,20.0,30.0],[10.0,20.0,40.0]])
#name_components = shatter_file_name()
model_file = None# "/almac/ignacio/data/mkl_models/mkl_0.model"
out_file = "mkl_outs/mkl_idx_corpus_source_repr_dims_op_other.out"
e = True #True
p = True
X = True
Y = True
if not model_file:
k = 3
N = 2
median_w = 30
print ">> Shapes: labels %s; Data %s\n\tlabelsT %s; DataT %s" % (labels.shape, data.shape, labels_t.shape, data_t.shape)
params = {'svm_c': expon(scale=100, loc=5),
'mkl_c': expon(scale=100, loc=5),
'degree': sp_randint(0, 24),
#'widths': expon_vector(loc = m, min_size = 2, max_size = 10)
'width_scale': [2.0, 2.5, 3.0, 3.5, 4.0],
'median_width': expon(scale=1, loc=median_w),
'kernel_size': [2, 3, 4, 5, 6, 7, 8, 9, 10] }
param_grid = []
for i in xrange(N):
param_grid.append(params)
i = 0
for params in param_grid:
mkl = mkl_regressor()
rs = RS(mkl, param_distributions = params, n_iter = 20, n_jobs = 24, cv = k, scoring="mean_squared_error")#"r2")
rs.fit(data, labels)
rs.best_estimator_.save('/almac/ignacio/data/mkl_models/mkl_%d.model' % i)
if e: # If user wants to save estimates
test_predict(data = data, machine = rs.best_estimator_, labels = labels, out_file = out_file)
if p: # If user wants to predict and save just after training.
assert not X is None # Provide test data
#preds = rs.best_estimator_.predict(data_t)
if Y: # Get performance
test_predict(data = data_t, machine = rs.best_estimator_, labels = labels_t, out_file = out_file + ".pred")
else: # Only predictions
test_predict(data = data_t, machine = rs.best_estimator_, out_file = out_file + ".pred")
sys.stderr.write("\n:>> Finished!!\n" )
else:
idx = 0
test_predict(data = data_t, machine = "mkl_regerssion", file="/almac/ignacio/data/mkl_models/mkl_%d.asc" % idx,
labels = labels_t, out_file = out_file)
sys.stderr.write("\n:>> Finished!!\n" )
|
gpl-2.0
|
jaidevd/scikit-learn
|
sklearn/metrics/cluster/tests/test_bicluster.py
|
394
|
1770
|
"""Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/pandas/sparse/series.py
|
7
|
28462
|
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.types.missing import isnull, notnull
from pandas.types.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.index as _index
from pandas.util.decorators import Appender
from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
from pandas.sparse.scipy_sparse import (_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(klass='SparseSeries',
axes_single_arg="{0, 'index'}")
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
Simplified alternate constructor
"""
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '%s\n%s' % (series_rep, repr(self.sp_index))
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self.set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values.set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to (dense) Series
"""
if sparse_only:
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
return self._constructor(self._data.reindex(new_index, method=method,
limit=limit, copy=copy),
index=new_index).__finalize__(self)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
def take(self, indices, axis=0, convert=True, *args, **kwargs):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
convert = nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of values. Preserves locations of NaN values
Returns
-------
cumsum : SparseSeries if `self` has a null `fill_value` and a
generic Series otherwise
"""
nv.validate_cumsum(args, kwargs)
new_array = SparseArray.cumsum(self.values)
if isinstance(new_array, SparseArray):
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
# TODO: gh-12855 - return a SparseSeries here
return Series(new_array, index=self.index).__finalize__(self)
@Appender(generic._shared_docs['isnull'])
def isnull(self):
arr = SparseArray(isnull(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isnull(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
@Appender(generic._shared_docs['isnotnull'])
def isnotnull(self):
arr = SparseArray(notnull(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notnull(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().valid()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isnull(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods, freq=None, axis=0):
if periods == 0:
return self.copy()
# no special handling of fill values yet
if not isnull(self.fill_value):
shifted = self.to_dense().shift(periods, freq=freq,
axis=axis)
return shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = _make_index(len(self), new_indices, self.sp_index)
arr = self.values._simple_new(self.sp_values[start:end].copy(),
new_sp_index, fill_value=np.nan)
return self._constructor(arr, index=self.index).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
.. versionadded:: 0.16.0
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
.. versionadded:: 0.16.0
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
comp_method=_arith_method,
bool_method=None, use_numexpr=False,
force=True)
# backwards compatiblity
class SparseTimeSeries(SparseSeries):
def __init__(self, *args, **kwargs):
# deprecation TimeSeries, #10890
warnings.warn("SparseTimeSeries is deprecated. Please use "
"SparseSeries", FutureWarning, stacklevel=2)
super(SparseTimeSeries, self).__init__(*args, **kwargs)
|
mit
|
Python4AstronomersAndParticlePhysicists/PythonWorkshop-ICE
|
notebooks/ml/solutions/exercise_6.py
|
1
|
2217
|
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score
from sklearn.model_selection import StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_moons
from matplotlib import patches
X, y = make_moons(n_samples=5000, noise=0.9)
clf = DecisionTreeClassifier(min_samples_leaf=50)
cv = StratifiedKFold(n_splits=5)
fig, ([ax1, ax2], [ax3, ax4]) = plt.subplots(2, 2, figsize=(12, 12))
roc_auc = []
pr_auc = []
for train, test in cv.split(X, y):
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
clf.fit(X_train, y_train)
prediction = clf.predict_proba(X_test)[:, 1]
p, r, thresholds_pr = precision_recall_curve(y_test, prediction)
fpr, tpr, thresholds_roc = roc_curve(y_test, prediction)
roc_auc.append(roc_auc_score(y_test, prediction))
pr_auc.append(average_precision_score(y_test, prediction))
ax1.step(thresholds_pr, r[: -1], color='gray', where='post')
ax1.step(thresholds_pr, p[: -1], color='darkgray', where='post')
ax2.step(r, p, color='darkmagenta', where='post')
ax3.step(thresholds_roc, tpr, color='gray', where='post')
ax3.step(thresholds_roc, fpr, color='darkgray', where='post')
ax4.step(fpr, tpr, color='mediumvioletred', where='post')
p1 = patches.Patch(color='gray', label='Recall')
p2 = patches.Patch(color='darkgray', label='Precission')
ax1.legend(handles=[p1, p2])
ax1.set_xlabel('Decission Threshold')
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
ax2.set_xlim([0, 1])
ax2.set_ylim([0, 1])
ax2.set_ylabel('Precission')
ax2.set_xlabel('Recall')
s = 'AUC {:0.3f} +/- {:0.3f}'.format(np.array(pr_auc).mean(), np.array(pr_auc).std())
ax2.text(0.2, 0.2, s)
p1 = patches.Patch(color='gray', label='True Positive Rate')
p2 = patches.Patch(color='darkgray', label='False Positive Rate')
ax3.legend(handles=[p1, p2])
ax3.set_xlabel('Decission Threshold')
ax3.set_xlim([0, 1])
ax3.set_ylim([0, 1])
ax4.set_xlim([0, 1])
ax4.set_ylim([0, 1])
ax4.set_ylabel('True Positive Rate')
ax4.set_xlabel('False Positive Rate')
s = 'AUC {:0.3f} +/- {:0.3f}'.format(np.array(roc_auc).mean(), np.array(roc_auc).std())
ax4.text(0.2, 0.2, s)
None
|
mit
|
MJuddBooth/pandas
|
pandas/tests/io/msgpack/test_pack.py
|
1
|
5318
|
# coding: utf-8
from collections import OrderedDict
import struct
import pytest
from pandas.compat import u
from pandas import compat
from pandas.io.msgpack import Packer, Unpacker, packb, unpackb
class TestPack(object):
def check(self, data, use_list=False):
re = unpackb(packb(data), use_list=use_list)
assert re == data
def testPack(self):
test_data = [
0, 1, 127, 128, 255, 256, 65535, 65536,
-1, -32, -33, -128, -129, -32768, -32769,
1.0,
b"", b"a", b"a" * 31, b"a" * 32,
None, True, False,
(), ((),), ((), None,),
{None: 0},
(1 << 23),
]
for td in test_data:
self.check(td)
def testPackUnicode(self):
test_data = [u(""), u("abcd"), [u("defgh")], u("Русский текст"), ]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-8'), use_list=1, encoding='utf-8')
assert re == td
packer = Packer(encoding='utf-8')
data = packer.pack(td)
re = Unpacker(
compat.BytesIO(data), encoding='utf-8', use_list=1).unpack()
assert re == td
def testPackUTF32(self):
test_data = [
compat.u(""),
compat.u("abcd"),
[compat.u("defgh")],
compat.u("Русский текст"),
]
for td in test_data:
re = unpackb(
packb(td, encoding='utf-32'), use_list=1, encoding='utf-32')
assert re == td
def testPackBytes(self):
test_data = [b"", b"abcd", (b"defgh", ), ]
for td in test_data:
self.check(td)
def testIgnoreUnicodeErrors(self):
re = unpackb(
packb(b'abc\xeddef'), encoding='utf-8', unicode_errors='ignore',
use_list=1)
assert re == "abcdef"
def testStrictUnicodeUnpack(self):
msg = (r"'utf-*8' codec can't decode byte 0xed in position 3:"
" invalid continuation byte")
with pytest.raises(UnicodeDecodeError, match=msg):
unpackb(packb(b'abc\xeddef'), encoding='utf-8', use_list=1)
def testStrictUnicodePack(self):
msg = (r"'ascii' codec can't encode character u*'\\xed' in position 3:"
r" ordinal not in range\(128\)")
with pytest.raises(UnicodeEncodeError, match=msg):
packb(compat.u("abc\xeddef"), encoding='ascii',
unicode_errors='strict')
def testIgnoreErrorsPack(self):
re = unpackb(
packb(
compat.u("abcФФФdef"), encoding='ascii',
unicode_errors='ignore'), encoding='utf-8', use_list=1)
assert re == compat.u("abcdef")
def testNoEncoding(self):
msg = "Can't encode unicode string: no encoding is specified"
with pytest.raises(TypeError, match=msg):
packb(compat.u("abc"), encoding=None)
def testDecodeBinary(self):
re = unpackb(packb("abc"), encoding=None, use_list=1)
assert re == b"abc"
def testPackFloat(self):
assert packb(1.0,
use_single_float=True) == b'\xca' + struct.pack('>f', 1.0)
assert packb(
1.0, use_single_float=False) == b'\xcb' + struct.pack('>d', 1.0)
def testArraySize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_array_header(size))
for i in range(size):
bio.write(packer.pack(i))
bio.seek(0)
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
def test_manualreset(self, sizes=[0, 5, 50, 1000]):
packer = Packer(autoreset=False)
for size in sizes:
packer.pack_array_header(size)
for i in range(size):
packer.pack(i)
bio = compat.BytesIO(packer.bytes())
unpacker = Unpacker(bio, use_list=1)
for size in sizes:
assert unpacker.unpack() == list(range(size))
packer.reset()
assert packer.bytes() == b''
def testMapSize(self, sizes=[0, 5, 50, 1000]):
bio = compat.BytesIO()
packer = Packer()
for size in sizes:
bio.write(packer.pack_map_header(size))
for i in range(size):
bio.write(packer.pack(i)) # key
bio.write(packer.pack(i * 2)) # value
bio.seek(0)
unpacker = Unpacker(bio)
for size in sizes:
assert unpacker.unpack() == {i: i * 2 for i in range(size)}
def test_odict(self):
seq = [(b'one', 1), (b'two', 2), (b'three', 3), (b'four', 4)]
od = OrderedDict(seq)
assert unpackb(packb(od), use_list=1) == dict(seq)
def pair_hook(seq):
return list(seq)
assert unpackb(
packb(od), object_pairs_hook=pair_hook, use_list=1) == seq
def test_pairlist(self):
pairlist = [(b'a', 1), (2, b'b'), (b'foo', b'bar')]
packer = Packer()
packed = packer.pack_map_pairs(pairlist)
unpacked = unpackb(packed, object_pairs_hook=list)
assert pairlist == unpacked
|
bsd-3-clause
|
daniel20162016/my-first
|
read_xml_all/calcul_matrix_compare_je_good_192matrix_try_1.py
|
1
|
6519
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_2.wav'
filename_1 ='francois_filon_pure_2.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
print 'word_start_point=',word_start_point
print 'word_length_point=',word_length_point
print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
#t_du_4_1 = int(word_start_point[3]);
#t_du_4_2 = int(word_end_point[3]);
#
#t_du_5_1 = int(word_start_point[4]);
#t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#
##==============================================================================
## this part is to calcul the 4 matrix
##==============================================================================
#for k in range (1,2):
# t_start=t_du_4_1
# XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
# x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
# x1_1=max_matrix_norm(x1_1)
# matrix_all_step_new_4 = np.zeros([192])
# for i in range(0,24):
# matrix_all_step_new_4[i]=x1_1[i]
##==============================================================================
## the other colonne is the all fft
##==============================================================================
# for i in range(1,8):
## print i
# XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
# x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
# x1_all=max_matrix_norm(x1_all)
# for j in range(0,24):
# matrix_all_step_new_4[24*i+j]=x1_all[j]
##print 'matrix_all_step_4=',matrix_all_step_4
##==============================================================================
## this part is to calcul the 5 matrix
##==============================================================================
#for k in range (1,2):
# t_start=t_du_5_1
# XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
# x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
# x1_1=max_matrix_norm(x1_1)
# matrix_all_step_new_5 = np.zeros([192])
# for i in range(0,24):
# matrix_all_step_new_5[i]=x1_1[i]
##==============================================================================
## the other colonne is the all fft
##==============================================================================
# for i in range(1,8):
## print i
# XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
# x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
# x1_all=max_matrix_norm(x1_all)
# for j in range(0,24):
# matrix_all_step_new_5[24*i+j]=x1_all[j]
##print 'matrix_all_step_5=',matrix_all_step_5
#np.savez('je_compare_192_matrix_2.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
np.savez('je_compare_192_matrix_3_je.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3)
|
mit
|
GoogleCloudPlatform/ipython-soccer-predictions
|
predict/power.py
|
3
|
7389
|
#!/usr/bin/python2.7
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Ranks soccer teams by computing a power index based
on game outcomes.
"""
import numpy as np
from numpy.linalg import LinAlgError
import pandas as pd
import world_cup
def _build_team_matrix(data, target_col):
""" Given a dataframe of games, builds a sparse power matrix.
We expect the input data to have two back to back rows for
each game. The first row will have information about the home
team, the second row will have information about the away team.
The matrix we compute will have columns representing teams and
rows representing games. For each game, the home team will have
a positive value that team's column. The away team will have a
negative value in that column. Since home advantage is so
important in soccer, we discount the home team by a certain
margin. Note that we also have to be somewhat careful here,
because for world cup data, we use values of is_home that are
not binary (that is, they range between 0,0.0 and 1.0.
The final column in the power matrix is a points value,
computed as the difference between the target column for the
home team and the target column for the away team.
"""
teams = {}
nrows = len(data) / 2
for teamid in data['teamid']:
teams[str(teamid)] = pd.Series(np.zeros(nrows))
result = pd.Series(np.empty(nrows))
teams[target_col] = result
current_season = None
current_discount = 2.0
for game in xrange(nrows):
home = data.iloc[game * 2]
away = data.iloc[game * 2 + 1]
if home['seasonid'] != current_season:
# Discount older seasons.
current_season = home['seasonid']
current_discount *= 0.6
print "New season %s" % (current_season,)
home_id = str(home['teamid'])
away_id = str(away['teamid'])
points = home[target_col] - away[target_col]
# Discount home team's performance.
teams[home_id][game] = (1.0 + home['is_home'] * .25) / current_discount
teams[away_id][game] = (-1.0 - away['is_home'] * .25) / current_discount
result[game] = points
return pd.DataFrame(teams)
def _build_power(games, outcomes, coerce_fn, acc=0.0001, alpha=1.0, snap=True):
""" Builds power model over a set of related games (they
should all be from the same competition, for example).
Given a series of games and their outcome, builds a logistic
regression model that computes a relative ranking for the teams.
Returns a dict of team id to power ranking between 0 and 1.
If snap is set, the rankings are bucketed into quartiles. This
is useful bcause we may only have rough estimates of power
rating and we don't want to get a false specificity.
"""
outcomes = pd.Series([coerce_fn(val) for val in outcomes])
model = world_cup.build_model_logistic(outcomes, games,
acc=acc, alpha=alpha)
# print model.summary()
params = np.exp(model.params)
del params['intercept']
params = params[params != 1.0]
max_param = params.max()
min_param = params.min()
param_range = max_param - min_param
if len(params) == 0 or param_range < 0.0001:
return None
params = params.sub(min_param)
params = params.div(param_range)
qqs = np.percentile(params, [20, 40, 60, 80])
def _snap(val):
""" Snaps a value to a quartile. """
for idx in xrange(len(qqs)):
if (qqs[idx] > val):
return idx * 0.25
return 1.0
if snap:
# Snap power data to rough quartiles.
return params.apply(_snap).to_dict()
else:
return params.to_dict()
def _get_power_map(competition, competition_data, col, coerce_fn):
""" Given the games in a competition and the target column
describing the result, compute a power ranking of the teams.
Since the 'fit' is likely to be fairly loose, we may
have to try several times with different regularization and
alpha parameters before we get it to converge.
Returns a map of team id to power ranking.
"""
acc = 0.000001
alpha = 0.5
while True:
if alpha < 0.1:
print "Skipping power ranking for competition %s column %s" % (
competition, col)
return {}
try:
games = _build_team_matrix(competition_data, col)
outcomes = games[col]
del games[col]
competition_power = _build_power(games, outcomes, coerce_fn, acc,
alpha, snap=False)
if not competition_power:
alpha /= 2
print 'Reducing alpha for %s to %f due lack of range' % (
competition, alpha)
else:
return competition_power
except LinAlgError, err:
alpha /= 2
print 'Reducing alpha for %s to %f due to error %s' % (
competition, alpha, err)
def add_power(data, power_train_data, cols):
""" Adds a number of power columns to a data frame.
Splits the power_train_data into competitions (since those will
have disjoint power statistics; for example, EPL teams don't play
MLS teams (in regular games), so trying to figure out which team is
stronger based on wins and losses isn't going to be useful.
Each entry in cols should be a column name that will be used to
predict, a function that wil evaluate the difference in that
column between the two teams that played a game, and a final
name that will be used to name the resulting power column.
Returns a data frame that is equivalent to 'data' ammended with
the power statistics for the primary team in the row.
"""
data = data.copy()
competitions = data['competitionid'].unique()
for (col, coerce_fn, final_name) in cols:
power = {}
for competition in competitions:
competition_data = power_train_data[
power_train_data['competitionid'] == competition]
power.update(
_get_power_map(competition, competition_data, col, coerce_fn))
names = {}
power_col = pd.Series(np.zeros(len(data)), data.index)
for index in xrange(len(data)):
teamid = str(data.iloc[index]['teamid'])
names[data.iloc[index]['team_name']] = power.get(teamid, 0.5)
power_col.iloc[index] = power.get(teamid, 0.5)
print ['%s: %0.03f' % (x[0], x[1])
for x in sorted(names.items(), key=(lambda x: x[1]))]
data['power_%s' % (final_name)] = power_col
return data
|
apache-2.0
|
ucbtrans/sumo-project
|
car_following_model/simulation.py
|
1
|
10824
|
'''
Simulation...
'''
import sys
import numpy as np
import matplotlib.pyplot as plt
from Vehicle import Vehicle
import plot_routines as pr
import pickle
def initialize():
'''
Returns array of vehicles; simulation step length in seconds.
'''
dt = 0.05 # seconds
total_time = 60 # seconds
total_vehicles = 50
l = 5 # meters
v_init = 0
v_max = 20 # m/s
v_max_lead = 20
a = 1.5 # acceleration in m/s^2
b = 2 # deceleration in m/s^2
g_min = 4 # meters
acc_g_min = 3 # meters
platoon_g_min = 3 # meters
stop_location = 300 # meters
tau = 2.05 # seconds
#acc_tau = 1.05 # seconds
acc_tau = 1.1 # seconds
#platoon_tau = 0.75 # seconds
platoon_tau = 0.8 # seconds
acc_penetration = 0
enable_platoons = False
#enable_platoons = True
#model = 'krauss' # Krauss
#model = 'idm' # IDM
model = 'iidm' # Improved IDM
#model = 'gipps' # Gipps
#model = 'helly' # Helly
#model = 'platoon' # Platoon
vehicles = []
is_acc = False
global acc_veh
#acc_veh = [True, False, True, False, False, True, False, False, True, True, False, False, False, False, False, False, False, True, True, True, False, False, True, True, False, True, False, False, False, True, True, False, True, True, False, False, True, True, False, True, True, True, True, True, True, False, True, True, False, False, False, False, True, False, False, False, True, True, False, False]
acc_dist_pickle = 'acc_distribution_{}.pickle'.format(int(100*acc_penetration))
acc_dist_pickle2 = acc_dist_pickle
######acc_dist_pickle = None
if acc_dist_pickle != None:
with open(acc_dist_pickle, 'rb') as f:
acc_veh = pickle.load(f)
f.close()
else:
acc_veh = []
for i in range(0, total_vehicles):
my_g_min = g_min
my_tau = tau
my_model = model
if ((acc_dist_pickle == None) and (np.random.rand() <= acc_penetration)) or \
((acc_dist_pickle != None) and (acc_veh[i])):
if acc_dist_pickle == None:
acc_veh.append(True)
my_g_min = acc_g_min
my_tau = acc_tau
if is_acc:
if enable_platoons:
my_model = 'platoon'
my_g_min = platoon_g_min
my_tau = platoon_tau
else:
is_acc = True
else:
if acc_dist_pickle == None:
acc_veh.append(False)
is_acc = False
if i == 0:
pos = 0
else:
pos -= (l + my_g_min)
if i == 0:
veh = Vehicle(i+1, pos, l=l, v=v_init, a=a, b=b, v_max=v_max_lead, g_min=my_g_min, tau=my_tau, stop_x=stop_location, model=my_model)
else:
veh = Vehicle(i+1, pos, l=l, v=v_init, a=a, b=b, v_max=v_max, g_min=my_g_min, tau=my_tau, stop_x=stop_location, model=my_model)
vehicles.append(veh)
with open(acc_dist_pickle2, 'wb') as f:
pickle.dump(acc_veh, f)
f.close()
return vehicles, dt, total_time
def simulation_step(vehicles, dt):
'''
Advance one simulation step.
vehicles - array of vehicles
dt - length of simulation step in seconds
'''
sz = len(vehicles)
for i in range(1, sz+1):
if i == sz:
vehicles[-i].step(None, dt=dt)
else:
vehicles[-i].step(vehicles[-i-1], dt)
return
def run_simulation(vehicles, dt, total_time):
'''
Run simulation.
vehicles - array of vehicles
dt - length of simulation step in seconds
total_time - time limit of the simulation
'''
sz = len(vehicles)
sensor_loc = 0.1
step = 0
i = 0
t_prev = 0
position = []
dx = []
dv = []
flow = []
flow1 = []
speed = []
safe_speed = []
leader_speed = []
accel = []
max_speed = []
time = []
time2 = []
ss_throughput = []
while step*dt < total_time:
step += 1
if i == 0 and i < sz and vehicles[i].get_position() >= sensor_loc and vehicles[i+1].get_speed() > 0:
theta0 = vehicles[i+1].tau + float(vehicles[i+1].g_min+vehicles[i+1].l)/vehicles[i].get_max_speed()
theta = vehicles[i+1].get_headway()
dx.append(vehicles[i+1].get_distance_headway())
dv.append(vehicles[i].get_speed() - vehicles[i+1].get_speed())
position.append(vehicles[i].get_position())
max_speed.append(vehicles[i].get_max_speed())
speed.append(vehicles[i].get_speed())
accel.append(vehicles[i].get_acceleration())
time.append(step*dt)
ss_throughput.append(3600/theta0)
flow.append(3600/theta)
i += 1
if i > 0 and i < sz-1 and vehicles[i].get_position() >= sensor_loc:
theta0 = vehicles[i+1].tau + float(vehicles[i].g_min+vehicles[i+1].l)/vehicles[i+1].get_max_speed()
theta = vehicles[i+1].get_headway()
theta1 = step*dt - t_prev
t_prev = step*dt
position.append(vehicles[i].get_position())
dx.append(vehicles[i+1].get_distance_headway())
dv.append(vehicles[i].get_speed() - vehicles[i+1].get_speed())
max_speed.append(vehicles[i].get_max_speed())
speed.append(vehicles[i].get_speed())
safe_speed.append(vehicles[i].get_safe_speed(vehicles[i-1].get_position(), vehicles[i-1].get_speed()))
leader_speed.append(vehicles[i-1].get_speed())
accel.append(vehicles[i].get_acceleration())
time.append(step*dt)
time2.append(step*dt)
ss_throughput.append(3600/theta0)
flow.append(3600/theta)
flow1.append(3600/theta1)
i += 1
simulation_step(vehicles, dt)
if True:
#fname = 'a_08.pickle'
#fname = 'a_15.pickle'
#fname = 'a_25.pickle'
#fname = 'acc_0.pickle'
#fname = 'acc_50.pickle'
#fname = 'acc_50p.pickle'
#fname = 'acc_100.pickle'
#fname = 'acc_100p.pickle'
#fname = 'gipps.pickle'
#fname = 'iidm.pickle'
#fname = 'helly.pickle'
#fname = 'd_8300.pickle'
fname = 'd_300.pickle'
with open(fname, 'wb') as f:
pickle.dump(time, f)
pickle.dump(time2, f)
pickle.dump(dx, f)
pickle.dump(dv, f)
pickle.dump(speed, f)
pickle.dump(accel, f)
pickle.dump(flow1, f)
f.close()
if False:
plt.figure()
plt.plot(time, position)
plt.plot(time, position, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Position (meters)')
#plt.show()
plt.figure()
plt.plot(time, dx)
plt.plot(time, dx, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Distance to Leader (meters)')
#plt.show()
plt.figure()
plt.plot(time, max_speed, 'r')
plt.plot(time, speed)
plt.plot(time, speed, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Speed (m/s)')
#plt.show()
if False:
plt.figure()
plt.plot(time, max_speed, 'r')
plt.plot(time2, safe_speed)
plt.plot(time2, safe_speed, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Safe Speed (m/s)')
#plt.show()
if False:
plt.figure()
plt.plot(time, max_speed, 'r')
plt.plot(time2, leader_speed)
plt.plot(time2, leader_speed, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Leader Speed (m/s)')
#plt.show()
plt.figure()
plt.plot(time, accel)
plt.plot(time, accel, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Acceleration (m/s^2)')
#plt.show()
plt.figure()
plt.plot(time, dv)
plt.plot(time, dv, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Speed Difference (m/s)')
#plt.show()
plt.figure()
plt.plot(time, ss_throughput, 'r')
plt.plot(time2, flow1, 'k')
plt.plot(time2, flow1, 'o')
#plt.plot(time, flow)
#plt.plot(time, flow, 'o')
plt.xlabel('Time (seconds)')
plt.ylabel('Flow (vph)')
plt.show()
if True:
return
i = 0
total = 10
vehicles_to_plot = range(i, i+total)
#vehicles_to_plot = [0, 5, 10, 15, 20, 25]
plt.figure()
plt.plot([vehicles[i].time[0], vehicles[i].time[-1]], [sensor_loc, sensor_loc], 'k')
for j in vehicles_to_plot:
plt.plot(vehicles[j].time, vehicles[j].trajectory)
plt.xlabel('Time (seconds)')
plt.ylabel('Position (meters)')
plt.axis([0, 60, -100, 350])
plt.figure()
plt.plot([vehicles[i].time[0], vehicles[i].time[-1]], [vehicles[i].get_max_speed(), vehicles[i].get_max_speed()], 'r')
for j in vehicles_to_plot:
plt.plot(vehicles[j].time, vehicles[j].speed)
plt.xlabel('Time (seconds)')
plt.ylabel('Speed (m/s)')
plt.figure()
for j in vehicles_to_plot:
plt.plot(vehicles[j].time, vehicles[j].acceleration)
plt.xlabel('Time (seconds)')
plt.ylabel('Acceleration (m/s^2)')
plt.axis([0, 60, -4, 2])
plt.show()
if True:
plt.figure()
for j in vehicles_to_plot:
plt.plot(vehicles[j].time, vehicles[j].distance_headway)
plt.xlabel('Time (seconds)')
plt.ylabel('Distance Headway (meters)')
plt.show()
#if False:
plt.figure()
for j in vehicles_to_plot:
plt.plot(vehicles[j].time, vehicles[j].headway)
plt.xlabel('Time (seconds)')
plt.ylabel('Headway (seconds)')
plt.show()
if False:
pr.contour(vehicles, dtype='v', dflt=0, title='Speed (m/s)')
pr.contour(vehicles, dtype='a', dflt=0, title='Acceleration (m/s^2)')
pr.contour(vehicles, dtype='h', dflt=0, title='Headway (seconds)')
pr.contour(vehicles, dtype='d', dflt=0, title='Distance Headway (meters)')
pr.contour(vehicles, dtype='f', dflt=0, title='Flow (vph)')
print("Count =", len(flow), "Theta_0 =", theta0, "Theta =", theta)
#==============================================================================
# Main function.
#==============================================================================
def main(argv):
print(__doc__)
vehicles, dt, total_time = initialize()
run_simulation(vehicles, dt, total_time)
if __name__ == "__main__":
main(sys.argv)
|
bsd-2-clause
|
ina-foss/ID-Fits
|
notebooks/.ipython/ipython_notebook_config.py
|
1
|
25411
|
# ID-Fits
# Copyright (c) 2015 Institut National de l'Audiovisuel, INA, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
# Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = u'notebooks'
#
# c.NotebookApp.file_to_run = ''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# paths for Javascript extensions. By default, this is just
# IPYTHONDIR/nbextensions
# c.NotebookApp.nbextensions_path = []
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u''
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
c.IPKernelApp.file_to_run = 'startup.py'
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'tlorieul'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
#
# c.MappingKernelManager.root_dir = u'/home/tlorieul/Dev/Snoop/src/lib/Python'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Glob patterns to hide in file and directory listings.
# c.NotebookManager.hide_globs = [u'__pycache__']
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# The directory name in which to keep notebook checkpoints
#
# This is a path relative to the notebook's own directory.
#
# By default, it is .ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = '.ipynb_checkpoints'
# Glob patterns to hide in file and directory listings.
# c.FileNotebookManager.hide_globs = [u'__pycache__']
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
#
# c.FileNotebookManager.notebook_dir = u'/home/tlorieul/Dev/Snoop/src/lib/Python'
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
|
lgpl-3.0
|
mattilyra/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
35
|
13182
|
from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
caneGuy/spark
|
python/pyspark/sql/functions.py
|
1
|
146700
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal, \
_create_column_from_name
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _create_function(name, doc=""):
"""Create a PySpark function by its name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_function_over_column(name, doc=""):
"""Similar with `_create_function` but creates a PySpark function that takes a column
(as string as well). This is mainly for PySpark functions to take strings as
column names.
"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(_to_java_column(col))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinstance(col1, Column):
arg1 = col1._jc
elif isinstance(col1, basestring):
arg1 = _create_column_from_name(col1)
else:
arg1 = float(col1)
if isinstance(col2, Column):
arg2 = col2._jc
elif isinstance(col2, basestring):
arg2 = _create_column_from_name(col2)
else:
arg2 = float(col2)
jc = getattr(sc._jvm.functions, name)(arg1, arg2)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
def _options_to_str(options):
return {key: to_str(value) for (key, value) in options.items()}
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
}
_functions_over_column = {
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4_over_column = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6_over_column = {
# unary math functions
'stddev': 'Aggregate function: alias for stddev_samp.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: alias for var_samp.',
'var_samp': 'Aggregate function: returns the unbiased sample variance of' +
' the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1_over_column = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_over_column.items():
globals()[_name] = since(1.3)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_1_4_over_column.items():
globals()[_name] = since(1.4)(_create_function_over_column(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6_over_column.items():
globals()[_name] = since(1.6)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_2_1_over_column.items():
globals()[_name] = since(2.1)(_create_function_over_column(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _functions_2_4.items():
globals()[_name] = since(2.4)(_create_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=2.4052597283576684),
Row(age=5, name=u'Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=1.1027054481455365),
Row(age=5, name=u'Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="uuuu-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='uuuu-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('uuuu-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(3.0)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to lower case.',
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function_over_column(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param format: string that can contain embedded format tags and used as result column's value
:param cols: list of column names (string) or list of :class:`Column` expressions to
be used in formatting
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value or column to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
>>> df.select(array_contains(df.data, lit("a"))).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
value = value._jc if isinstance(value, Column) else value
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, lit("a"))).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(
_to_java_column(col), lit(extraction)._jc)) # noqa: F821 'lit' is dynamically defined.
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s=u'abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
COGROUPED_MAP = PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
MAP_ITER = PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs can be used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. SCALAR_ITER
A scalar iterator UDF is semantically the same as the scalar Pandas UDF above except that the
wrapped Python function takes an iterator of batches as input instead of a single batch and,
instead of returning a single output batch, it yields output batches or explicitly returns an
generator or an iterator of output batches.
It is useful when the UDF execution requires initializing some state, e.g., loading a machine
learning model file to apply inference to every input batch.
.. note:: It is not guaranteed that one invocation of a scalar iterator UDF will process all
batches from one partition, although it is currently implemented this way.
Your code shall not rely on this behavior because it might change in the future for
further optimization, e.g., one invocation processes multiple partitions.
Scalar iterator UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
>>> pdf = pd.DataFrame([1, 2, 3], columns=["x"]) # doctest: +SKIP
>>> df = spark.createDataFrame(pdf) # doctest: +SKIP
When the UDF is called with a single column that is not `StructType`, the input to the
underlying function is an iterator of `pd.Series`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_one(batch_iter):
... for x in batch_iter:
... yield x + 1
...
>>> df.select(plus_one(col("x"))).show() # doctest: +SKIP
+-----------+
|plus_one(x)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
When the UDF is called with more than one columns, the input to the underlying function is an
iterator of `pd.Series` tuple.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_cols(batch_iter):
... for a, b in batch_iter:
... yield a * b
...
>>> df.select(multiply_two_cols(col("x"), col("x"))).show() # doctest: +SKIP
+-----------------------+
|multiply_two_cols(x, x)|
+-----------------------+
| 1|
| 4|
| 9|
+-----------------------+
When the UDF is called with a single column that is `StructType`, the input to the underlying
function is an iterator of `pd.DataFrame`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_nested_cols(pdf_iter):
... for pdf in pdf_iter:
... yield pdf["a"] * pdf["b"]
...
>>> df.select(
... multiply_two_nested_cols(
... struct(col("x").alias("a"), col("x").alias("b"))
... ).alias("y")
... ).show() # doctest: +SKIP
+---+
| y|
+---+
| 1|
| 4|
| 9|
+---+
In the UDF, you can initialize some states before processing batches, wrap your code with
`try ... finally ...` or use context managers to ensure the release of resources at the end
or in case of early termination.
>>> y_bc = spark.sparkContext.broadcast(1) # doctest: +SKIP
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_y(batch_iter):
... y = y_bc.value # initialize some state
... try:
... for x in batch_iter:
... yield x + y
... finally:
... pass # release resources here, if any
...
>>> df.select(plus_y(col("x"))).show() # doctest: +SKIP
+---------+
|plus_y(x)|
+---------+
| 2|
| 3|
| 4|
+---------+
3. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
4. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
5. MAP_ITER
A map iterator Pandas UDFs are used to transform data with an iterator of batches.
It can be used with :meth:`pyspark.sql.DataFrame.mapInPandas`.
It can return the output of arbitrary length in contrast to the scalar Pandas UDF.
It maps an iterator of batches in the current :class:`DataFrame` using a Pandas user-defined
function and returns the result as a :class:`DataFrame`.
The user-defined function should take an iterator of `pandas.DataFrame`\\s and return another
iterator of `pandas.DataFrame`\\s. All columns are passed together as an
iterator of `pandas.DataFrame`\\s to the user-defined function and the returned iterator of
`pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
>>> df = spark.createDataFrame([(1, 21), (2, 30)],
... ("id", "age")) # doctest: +SKIP
>>> @pandas_udf(df.schema, PandasUDFType.MAP_ITER) # doctest: +SKIP
... def filter_func(batch_iter):
... for pdf in batch_iter:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
6. COGROUPED_MAP
A cogrouped map UDF defines transformation: (`pandas.DataFrame`, `pandas.DataFrame`) ->
`pandas.DataFrame`. The `returnType` should be a :class:`StructType` describing the schema
of the returned `pandas.DataFrame`. The column labels of the returned `pandas.DataFrame`
must either match the field names in the defined `returnType` schema if specified as strings,
or match the field data types by position if not strings, e.g. integer indices. The length
of the returned `pandas.DataFrame` can be arbitrary.
CoGrouped map UDFs are used with :meth:`pyspark.sql.CoGroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df1 = spark.createDataFrame(
... [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
... ("time", "id", "v1"))
>>> df2 = spark.createDataFrame(
... [(20000101, 1, "x"), (20000101, 2, "y")],
... ("time", "id", "v2"))
>>> @pandas_udf("time int, id int, v1 double, v2 string",
... PandasUDFType.COGROUPED_MAP) # doctest: +SKIP
... def asof_join(l, r):
... return pd.merge_asof(l, r, on="time", by="id")
>>> df1.groupby("id").cogroup(df2.groupby("id")).apply(asof_join).show() # doctest: +SKIP
+---------+---+---+---+
| time| id| v1| v2|
+---------+---+---+---+
| 20000101| 1|1.0| x|
| 20000102| 1|3.0| x|
| 20000101| 2|2.0| y|
| 20000102| 2|4.0| y|
+---------+---+---+---+
Alternatively, the user can define a function that takes three arguments. In this case,
the grouping key(s) will be passed as the first argument and the data will be passed as the
second and third arguments. The grouping key(s) will be passed as a tuple of numpy data
types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as two
`pandas.DataFrame` containing all columns from the original Spark DataFrames.
>>> @pandas_udf("time int, id int, v1 double, v2 string",
... PandasUDFType.COGROUPED_MAP) # doctest: +SKIP
... def asof_join(k, l, r):
... if k == (1,):
... return pd.merge_asof(l, r, on="time", by="id")
... else:
... return pd.DataFrame(columns=['time', 'id', 'v1', 'v2'])
>>> df1.groupby("id").cogroup(df2.groupby("id")).apply(asof_join).show() # doctest: +SKIP
+---------+---+---+---+
| time| id| v1| v2|
+---------+---+---+---+
| 20000101| 1|1.0| x|
| 20000102| 1|3.0| x|
+---------+---+---+---+
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| ''| ''| ''| '\x01'| '\x01'| ''| ''| '\x01'| '\x01'| ''| ''| ''| X| X| 'a'| X| X| ''| X| ''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')| X| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 0.24.2 and PyArrow 0.13.0 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
spark.conf.set("spark.sql.legacy.utcTimestampFunc.enabled", "true")
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.conf.unset("spark.sql.legacy.utcTimestampFunc.enabled")
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
M-R-Houghton/euroscipy_2015
|
bokeh/bokeh/charts/_data_adapter.py
|
43
|
8802
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the ChartObject class, a minimal prototype class to build more chart
types on top of it. It provides the mechanisms to support the shared chained
methods.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
from collections import OrderedDict
from ..properties import bokeh_integer_types, Datetime
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import blaze
except ImportError:
blaze=None
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
DEFAULT_INDEX_ALIASES = list('abcdefghijklmnopqrstuvz1234567890')
DEFAULT_INDEX_ALIASES += list(zip(DEFAULT_INDEX_ALIASES, DEFAULT_INDEX_ALIASES))
class DataAdapter(object):
"""
Adapter object used to normalize Charts inputs to a common interface.
Supported inputs are dict, list, tuple, np.ndarray and pd.DataFrame.
"""
def __init__(self, data, index=None, columns=None, force_alias=True):
self.__values = data
self._values = self.validate_values(data)
self.convert_index_to_int = False
self._columns_map = {}
self.convert_items_to_dict = False
if columns is None and force_alias:
# no column 'labels' defined for data... in this case we use
# default names
keys = getattr(self._values, 'keys', None)
if callable(keys):
columns = list(keys())
elif keys is None:
columns = list(map(str, range(len(data))))
else:
columns = list(keys)
if columns:
self._columns = columns
# define a mapping between the real keys to access data and the aliases
# we have defined using 'columns'
self._columns_map = dict(zip(columns, self.keys()))
if index is not None:
self._index = index
self.convert_items_to_dict = True
elif force_alias:
_index = getattr(self._values, 'index', None)
# check because if it is a callable self._values is not a
# dataframe (probably a list)
if _index is None:
indexes = self.index
if isinstance(indexes[0], int):
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
elif not callable(_index):
self._index = list(_index)
self.convert_items_to_dict = True
else:
self._index = DEFAULT_INDEX_ALIASES[:][:len(self.values()[0])]
self.convert_items_to_dict = True
@staticmethod
def is_number(value):
numbers = (float, ) + bokeh_integer_types
return isinstance(value, numbers)
@staticmethod
def is_datetime(value):
try:
dt = Datetime(value)
dt # shut up pyflakes
return True
except ValueError:
return False
@staticmethod
def validate_values(values):
if np and isinstance(values, np.ndarray):
if len(values.shape) == 1:
return np.array([values])
else:
return values
elif pd and isinstance(values, pd.DataFrame):
return values
elif isinstance(values, (dict, OrderedDict)):
if all(DataAdapter.is_number(x) for x in values.values()):
return values
return values
elif isinstance(values, (list, tuple)):
if all(DataAdapter.is_number(x) for x in values):
return [values]
return values
elif hasattr(values, '__array__'):
values = pd.DataFrame(np.asarray(values))
return values
# TODO: Improve this error message..
raise TypeError("Input type not supported! %s" % values)
def index_converter(self, x):
key = self._columns_map.get(x, x)
if self.convert_index_to_int:
key = int(key)
return key
def keys(self):
# assuming it's a dict or dataframe
keys = getattr(self._values, "keys", None)
if callable(keys):
return list(keys())
elif keys is None:
self.convert_index_to_int = True
indexes = range(len(self._values))
return list(map(str, indexes))
else:
return list(keys)
def __len__(self):
return len(self.values())
def __iter__(self):
for k in self.keys():
yield k
def __getitem__(self, key):
val = self._values[self.index_converter(key)]
# if we have "index aliases" we need to remap the values...
if self.convert_items_to_dict:
val = dict(zip(self._index, val))
return val
def values(self):
return self.normalize_values(self._values)
@staticmethod
def normalize_values(values):
_values = getattr(values, "values", None)
if callable(_values):
return list(_values())
elif _values is None:
return values
else:
# assuming it's a dataframe, in that case it returns transposed
# values compared to it's dict equivalent..
return list(_values.T)
def items(self):
return [(key, self[key]) for key in self]
def iterkeys(self):
return iter(self)
def itervalues(self):
for k in self:
yield self[k]
def iteritems(self):
for k in self:
yield (k, self[k])
@property
def columns(self):
try:
return self._columns
except AttributeError:
return list(self.keys())
@property
def index(self):
try:
return self._index
except AttributeError:
index = getattr(self._values, "index", None)
if not callable(index) and index is not None:
# guess it's a pandas dataframe..
return index
# no, it's not. So it's probably a list so let's get the
# values and check
values = self.values()
if isinstance(values, dict):
return list(values.keys())
else:
first_el = self.values()[0]
if isinstance(first_el, dict):
indexes = list(first_el.keys())
else:
indexes = range(0, len(self.values()[0]))
self._index = indexes
return indexes
#-----------------------------------------------------------------------------
# Convenience methods
#-----------------------------------------------------------------------------
@staticmethod
def get_index_and_data(values, index=None):
"""Parse values (that must be one of the DataAdapter supported
input types) and create an separate/create index and data
depending on values type and index.
Args:
values (iterable): container that holds data to be plotted using
on the Chart classes
Returns:
A tuple of (index, values), where: ``index`` is an iterable that
represents the data index and ``values`` is an iterable containing
the values to be plotted.
"""
_values = DataAdapter(values, force_alias=False)
if hasattr(values, 'keys'):
if index is not None:
if isinstance(index, string_types):
xs = _values[index]
else:
xs = index
else:
try:
xs = _values.index
except AttributeError:
xs = values.index
else:
if index is None:
xs = _values.index
elif isinstance(index, string_types):
xs = _values[index]
else:
xs = index
return xs, _values
|
mit
|
soulmachine/scikit-learn
|
examples/ensemble/plot_gradient_boosting_quantile.py
|
392
|
2114
|
"""
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
aerdem4/kaggle-quora-dup
|
model.py
|
1
|
9236
|
import re
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout
from keras.layers.core import Lambda
from keras.layers.merge import concatenate, add, multiply
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.noise import GaussianNoise
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
np.random.seed(0)
WNL = WordNetLemmatizer()
STOP_WORDS = set(stopwords.words('english'))
MAX_SEQUENCE_LENGTH = 30
MIN_WORD_OCCURRENCE = 100
REPLACE_WORD = "memento"
EMBEDDING_DIM = 300
NUM_FOLDS = 10
BATCH_SIZE = 1025
EMBEDDING_FILE = "glove.840B.300d.txt"
def cutter(word):
if len(word) < 4:
return word
return WNL.lemmatize(WNL.lemmatize(word, "n"), "v")
def preprocess(string):
string = string.lower().replace(",000,000", "m").replace(",000", "k").replace("′", "'").replace("’", "'") \
.replace("won't", "will not").replace("cannot", "can not").replace("can't", "can not") \
.replace("n't", " not").replace("what's", "what is").replace("it's", "it is") \
.replace("'ve", " have").replace("i'm", "i am").replace("'re", " are") \
.replace("he's", "he is").replace("she's", "she is").replace("'s", " own") \
.replace("%", " percent ").replace("₹", " rupee ").replace("$", " dollar ") \
.replace("€", " euro ").replace("'ll", " will").replace("=", " equal ").replace("+", " plus ")
string = re.sub('[“”\(\'…\)\!\^\"\.;:,\-\??\{\}\[\]\\/\*@]', ' ', string)
string = re.sub(r"([0-9]+)000000", r"\1m", string)
string = re.sub(r"([0-9]+)000", r"\1k", string)
string = ' '.join([cutter(w) for w in string.split()])
return string
def get_embedding():
embeddings_index = {}
f = open(EMBEDDING_FILE)
for line in f:
values = line.split()
word = values[0]
if len(values) == EMBEDDING_DIM + 1 and word in top_words:
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
return embeddings_index
def is_numeric(s):
return any(i.isdigit() for i in s)
def prepare(q):
new_q = []
surplus_q = []
numbers_q = []
new_memento = True
for w in q.split()[::-1]:
if w in top_words:
new_q = [w] + new_q
new_memento = True
elif w not in STOP_WORDS:
if new_memento:
new_q = ["memento"] + new_q
new_memento = False
if is_numeric(w):
numbers_q = [w] + numbers_q
else:
surplus_q = [w] + surplus_q
else:
new_memento = True
if len(new_q) == MAX_SEQUENCE_LENGTH:
break
new_q = " ".join(new_q)
return new_q, set(surplus_q), set(numbers_q)
def extract_features(df):
q1s = np.array([""] * len(df), dtype=object)
q2s = np.array([""] * len(df), dtype=object)
features = np.zeros((len(df), 4))
for i, (q1, q2) in enumerate(list(zip(df["question1"], df["question2"]))):
q1s[i], surplus1, numbers1 = prepare(q1)
q2s[i], surplus2, numbers2 = prepare(q2)
features[i, 0] = len(surplus1.intersection(surplus2))
features[i, 1] = len(surplus1.union(surplus2))
features[i, 2] = len(numbers1.intersection(numbers2))
features[i, 3] = len(numbers1.union(numbers2))
return q1s, q2s, features
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
train["question1"] = train["question1"].fillna("").apply(preprocess)
train["question2"] = train["question2"].fillna("").apply(preprocess)
print("Creating the vocabulary of words occurred more than", MIN_WORD_OCCURRENCE)
all_questions = pd.Series(train["question1"].tolist() + train["question2"].tolist()).unique()
vectorizer = CountVectorizer(lowercase=False, token_pattern="\S+", min_df=MIN_WORD_OCCURRENCE)
vectorizer.fit(all_questions)
top_words = set(vectorizer.vocabulary_.keys())
top_words.add(REPLACE_WORD)
embeddings_index = get_embedding()
print("Words are not found in the embedding:", top_words - embeddings_index.keys())
top_words = embeddings_index.keys()
print("Train questions are being prepared for LSTM...")
q1s_train, q2s_train, train_q_features = extract_features(train)
tokenizer = Tokenizer(filters="")
tokenizer.fit_on_texts(np.append(q1s_train, q2s_train))
word_index = tokenizer.word_index
data_1 = pad_sequences(tokenizer.texts_to_sequences(q1s_train), maxlen=MAX_SEQUENCE_LENGTH)
data_2 = pad_sequences(tokenizer.texts_to_sequences(q2s_train), maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(train["is_duplicate"])
nb_words = len(word_index) + 1
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
print("Train features are being merged with NLP and Non-NLP features...")
train_nlp_features = pd.read_csv("data/nlp_features_train.csv")
train_non_nlp_features = pd.read_csv("data/non_nlp_features_train.csv")
features_train = np.hstack((train_q_features, train_nlp_features, train_non_nlp_features))
print("Same steps are being applied for test...")
test["question1"] = test["question1"].fillna("").apply(preprocess)
test["question2"] = test["question2"].fillna("").apply(preprocess)
q1s_test, q2s_test, test_q_features = extract_features(test)
test_data_1 = pad_sequences(tokenizer.texts_to_sequences(q1s_test), maxlen=MAX_SEQUENCE_LENGTH)
test_data_2 = pad_sequences(tokenizer.texts_to_sequences(q2s_test), maxlen=MAX_SEQUENCE_LENGTH)
test_nlp_features = pd.read_csv("data/nlp_features_test.csv")
test_non_nlp_features = pd.read_csv("data/non_nlp_features_test.csv")
features_test = np.hstack((test_q_features, test_nlp_features, test_non_nlp_features))
skf = StratifiedKFold(n_splits=NUM_FOLDS, shuffle=True)
model_count = 0
for idx_train, idx_val in skf.split(train["is_duplicate"], train["is_duplicate"]):
print("MODEL:", model_count)
data_1_train = data_1[idx_train]
data_2_train = data_2[idx_train]
labels_train = labels[idx_train]
f_train = features_train[idx_train]
data_1_val = data_1[idx_val]
data_2_val = data_2[idx_val]
labels_val = labels[idx_val]
f_val = features_train[idx_val]
embedding_layer = Embedding(nb_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
lstm_layer = LSTM(75, recurrent_dropout=0.2)
sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype="int32")
embedded_sequences_1 = embedding_layer(sequence_1_input)
x1 = lstm_layer(embedded_sequences_1)
sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype="int32")
embedded_sequences_2 = embedding_layer(sequence_2_input)
y1 = lstm_layer(embedded_sequences_2)
features_input = Input(shape=(f_train.shape[1],), dtype="float32")
features_dense = BatchNormalization()(features_input)
features_dense = Dense(200, activation="relu")(features_dense)
features_dense = Dropout(0.2)(features_dense)
addition = add([x1, y1])
minus_y1 = Lambda(lambda x: -x)(y1)
merged = add([x1, minus_y1])
merged = multiply([merged, merged])
merged = concatenate([merged, addition])
merged = Dropout(0.4)(merged)
merged = concatenate([merged, features_dense])
merged = BatchNormalization()(merged)
merged = GaussianNoise(0.1)(merged)
merged = Dense(150, activation="relu")(merged)
merged = Dropout(0.2)(merged)
merged = BatchNormalization()(merged)
out = Dense(1, activation="sigmoid")(merged)
model = Model(inputs=[sequence_1_input, sequence_2_input, features_input], outputs=out)
model.compile(loss="binary_crossentropy",
optimizer="nadam")
early_stopping = EarlyStopping(monitor="val_loss", patience=5)
best_model_path = "best_model" + str(model_count) + ".h5"
model_checkpoint = ModelCheckpoint(best_model_path, save_best_only=True, save_weights_only=True)
hist = model.fit([data_1_train, data_2_train, f_train], labels_train,
validation_data=([data_1_val, data_2_val, f_val], labels_val),
epochs=15, batch_size=BATCH_SIZE, shuffle=True,
callbacks=[early_stopping, model_checkpoint], verbose=1)
model.load_weights(best_model_path)
print(model_count, "validation loss:", min(hist.history["val_loss"]))
preds = model.predict([test_data_1, test_data_2, features_test], batch_size=BATCH_SIZE, verbose=1)
submission = pd.DataFrame({"test_id": test["test_id"], "is_duplicate": preds.ravel()})
submission.to_csv("predictions/preds" + str(model_count) + ".csv", index=False)
model_count += 1
|
mit
|
eco32i/tweed
|
tasm/plotting.py
|
1
|
4775
|
import math
import numpy as np
import pandas as pd
# import brewer2mpl
from pylab import figure, plot
#from scipy.stats.kde import gaussian_kde
import matplotlib.pyplot as plt
from django.http import HttpResponse
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_model
from django.shortcuts import get_object_or_404
from django.views.generic.list import ListView
from tasm.models import Assembly, Transcript
from tasm.ggstyle import rstyle, rhist
class PlotMixin(object):
'''
A mixin that allows matplotlib plotting. Should be used together
with ListView or DetailView subclasses to get the plotting data
from the database.
'''
format = None
def make_plot(self):
'''
This needs to be implemented in the subclass.
'''
pass
def style_plot(self, axes):
'''
By default does nothing. May be used to style the plot
(xkcd, ggplot2, etc).
'''
pass
def get_response_content_type(self):
'''
Returns plot format to be used in the response.
'''
if self.format is not None:
return 'image/{0}'.format(self.format)
else:
raise ImproperlyConfigured('No format is specified for the plot')
def render_to_plot(self, context, **response_kwargs):
response = HttpResponse(
content_type=self.get_response_content_type()
)
fig = self.make_plot()
fig.savefig(response, format=self.format)
return response
class TranscriptPlotView(ListView, PlotMixin):
'''
A view that outputs various Transcript plots for the given assembly.
The follwoing plots are produced:
- scatter plot of normalized coverage vs normalized length
- histogram of normalized coverage of all and best for locus
transcripts
- histogram of normalized length of all and best for locus
transcripts
'''
data_fields = ['length', 'coverage',]
model = Transcript
format = 'png'
def get_queryset(self):
self.asm = get_object_or_404(Assembly, pk=int(self.kwargs.get('asm_pk', '')))
return self.model._default_manager.for_asm(self.asm)
def get_dataframe(self, best=False):
'''
Builds a pandas dataframe by retrieving the fields specified
in self.data_fields from self.queryset.
'''
opts = self.model._meta
fields = [f for f in opts.get_all_field_names() if f in self.data_fields]
if best:
values_dict = self.model._default_manager.best_for_asm(self.asm).values(*fields)
else:
values_dict = self.model._default_manager.for_asm(self.asm).values(*fields)
df = pd.DataFrame.from_records(values_dict)
return df
def make_plot(self):
def _norm_df(df):
# FIXME: This is silly because hardcodes field names
max_len = max(df['length'])
max_cov = max(df['coverage'])
df['length'] = df['length'] * 100 / max_len
df['coverage'] = df['coverage'] / max_cov
return df
def _scatter(ax, df1, df2):
ax.plot(df1['length'], df1['coverage'], 'o', color='#dc322f', alpha=0.2)
ax.plot(df2['length'], df2['coverage'], 'o', color='#268bd2', alpha=0.2)
ax.set_xlabel('Normalized length')
ax.set_ylabel('Normalized coverage')
rstyle(ax)
def _hist(ax, df1, df2, col='coverage'):
defaults = {
'facecolor': '#268bd2',
'edgecolor': '#268bd2',
#'normed': True,
'alpha': 0.25,
}
hist, bins, patches = rhist(ax, df1[col], **defaults)
defaults.update({'facecolor': '#dc322f', 'edgecolor': '#dc322f',})
hist, bins, patches = rhist(ax, df2[col], **defaults)
ax.set_xlabel('Normalized {0}'.format(col))
ax.set_ylabel('Frequency')
rstyle(ax)
import matplotlib
matplotlib.use('Agg')
fig = plt.figure(figsize=(6,18))
fig.patch.set_alpha(0)
ax = fig.add_subplot(311)
df_best = _norm_df(self.get_dataframe(best=True))
df_all = _norm_df(self.get_dataframe())
_scatter(ax, df_all, df_best)
ax = fig.add_subplot(312)
_hist(ax, df_best, df_all)
ax = fig.add_subplot(313)
_hist(ax, df_best, df_all, col='length')
return fig
def render_to_response(self, context, **response_kwargs):
return self.render_to_plot(context, **response_kwargs)
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/frame/methods/test_to_dict.py
|
2
|
10227
|
from collections import OrderedDict, defaultdict
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas import DataFrame, Series, Timestamp
import pandas._testing as tm
class TestDataFrameToDict:
def test_to_dict_timestamp(self):
# GH#11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp("20130101")
test_data = DataFrame({"A": [tsmp, tsmp], "B": [tsmp, tsmp]})
test_data_mixed = DataFrame({"A": [tsmp, tsmp], "B": [1, 2]})
expected_records = [{"A": tsmp, "B": tsmp}, {"A": tsmp, "B": tsmp}]
expected_records_mixed = [{"A": tsmp, "B": 1}, {"A": tsmp, "B": 2}]
assert test_data.to_dict(orient="records") == expected_records
assert test_data_mixed.to_dict(orient="records") == expected_records_mixed
expected_series = {
"A": Series([tsmp, tsmp], name="A"),
"B": Series([tsmp, tsmp], name="B"),
}
expected_series_mixed = {
"A": Series([tsmp, tsmp], name="A"),
"B": Series([1, 2], name="B"),
}
tm.assert_dict_equal(test_data.to_dict(orient="series"), expected_series)
tm.assert_dict_equal(
test_data_mixed.to_dict(orient="series"), expected_series_mixed
)
expected_split = {
"index": [0, 1],
"data": [[tsmp, tsmp], [tsmp, tsmp]],
"columns": ["A", "B"],
}
expected_split_mixed = {
"index": [0, 1],
"data": [[tsmp, 1], [tsmp, 2]],
"columns": ["A", "B"],
}
tm.assert_dict_equal(test_data.to_dict(orient="split"), expected_split)
tm.assert_dict_equal(
test_data_mixed.to_dict(orient="split"), expected_split_mixed
)
def test_to_dict_index_not_unique_with_index_orient(self):
# GH#22801
# Data loss when indexes are not unique. Raise ValueError.
df = DataFrame({"a": [1, 2], "b": [0.5, 0.75]}, index=["A", "A"])
msg = "DataFrame index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient="index")
def test_to_dict_invalid_orient(self):
df = DataFrame({"A": [0, 1]})
msg = "orient 'xinvalid' not understood"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient="xinvalid")
@pytest.mark.parametrize("orient", ["d", "l", "r", "sp", "s", "i"])
def test_to_dict_short_orient_warns(self, orient):
# GH#32515
df = DataFrame({"A": [0, 1]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.to_dict(orient=orient)
@pytest.mark.parametrize("mapping", [dict, defaultdict(list), OrderedDict])
def test_to_dict(self, mapping):
# orient= should only take the listed options
# see GH#32515
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
# GH#16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("list", mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][int(k2) - 1]
recons_data = DataFrame(test_data).to_dict("series", mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("split", mapping)
expected_split = {
"columns": ["A", "B"],
"index": ["1", "2", "3"],
"data": [[1.0, "1"], [2.0, "2"], [np.nan, "3"]],
}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("records", mapping)
expected_records = [
{"A": 1.0, "B": "1"},
{"A": 2.0, "B": "2"},
{"A": np.nan, "B": "3"},
]
assert isinstance(recons_data, list)
assert len(recons_data) == 3
for left, right in zip(recons_data, expected_records):
tm.assert_dict_equal(left, right)
# GH#10844
recons_data = DataFrame(test_data).to_dict("index")
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k2][k]
df = DataFrame(test_data)
df["duped"] = df[df.columns[0]]
recons_data = df.to_dict("index")
comp_data = test_data.copy()
comp_data["duped"] = comp_data[df.columns[0]]
for k, v in comp_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k2][k]
@pytest.mark.parametrize("mapping", [list, defaultdict, []])
def test_to_dict_errors(self, mapping):
# GH#16122
df = DataFrame(np.random.randn(3, 3))
msg = "|".join(
[
"unsupported type: <class 'list'>",
r"to_dict\(\) only accepts initialized defaultdicts",
]
)
with pytest.raises(TypeError, match=msg):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH#16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"])
with tm.assert_produces_warning(UserWarning):
df.to_dict()
# orient - orient argument to to_dict function
# item_getter - function for extracting value from
# the resulting dict using column name and index
@pytest.mark.parametrize(
"orient,item_getter",
[
("dict", lambda d, col, idx: d[col][idx]),
("records", lambda d, col, idx: d[idx][col]),
("list", lambda d, col, idx: d[col][idx]),
("split", lambda d, col, idx: d["data"][idx][d["columns"].index(col)]),
("index", lambda d, col, idx: d[idx][col]),
],
)
def test_to_dict_box_scalars(self, orient, item_getter):
# GH#14216, GH#23753
# make sure that we are boxing properly
df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
result = df.to_dict(orient=orient)
assert isinstance(item_getter(result, "a", 0), int)
assert isinstance(item_getter(result, "b", 0), float)
def test_to_dict_tz(self):
# GH#18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [
(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc),),
]
df = DataFrame(list(data), columns=["d"])
result = df.to_dict(orient="records")
expected = [
{"d": Timestamp("2017-11-18 21:53:00.219225+0000", tz=pytz.utc)},
{"d": Timestamp("2017-11-18 22:06:30.061810+0000", tz=pytz.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
@pytest.mark.parametrize(
"into, expected",
[
(
dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
2: {"int_col": 3, "float_col": 3.0},
},
),
(
OrderedDict,
OrderedDict(
[
(0, {"int_col": 1, "float_col": 1.0}),
(1, {"int_col": 2, "float_col": 2.0}),
(2, {"int_col": 3, "float_col": 3.0}),
]
),
),
(
defaultdict(dict),
defaultdict(
dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
2: {"int_col": 3, "float_col": 3.0},
},
),
),
],
)
def test_to_dict_index_dtypes(self, into, expected):
# GH#18580
# When using to_dict(orient='index') on a dataframe with int
# and float columns only the int columns were cast to float
df = DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]})
result = df.to_dict(orient="index", into=into)
cols = ["int_col", "float_col"]
result = DataFrame.from_dict(result, orient="index")[cols]
expected = DataFrame.from_dict(expected, orient="index")[cols]
tm.assert_frame_equal(result, expected)
def test_to_dict_numeric_names(self):
# GH#24940
df = DataFrame({str(i): [i] for i in range(5)})
result = set(df.to_dict("records")[0].keys())
expected = set(df.columns)
assert result == expected
def test_to_dict_wide(self):
# GH#24939
df = DataFrame({(f"A_{i:d}"): [i] for i in range(256)})
result = df.to_dict("records")[0]
expected = {f"A_{i:d}": i for i in range(256)}
assert result == expected
def test_to_dict_orient_dtype(self):
# GH22620 & GH21256
df = DataFrame(
{
"bool": [True, True, False],
"datetime": [
datetime(2018, 1, 1),
datetime(2019, 2, 2),
datetime(2020, 3, 3),
],
"float": [1.0, 2.0, 3.0],
"int": [1, 2, 3],
"str": ["X", "Y", "Z"],
}
)
expected = {
"int": int,
"float": float,
"str": str,
"datetime": Timestamp,
"bool": bool,
}
for df_dict in df.to_dict("records"):
result = {col: type(df_dict[col]) for col in list(df.columns)}
assert result == expected
|
bsd-3-clause
|
glouppe/scikit-learn
|
examples/plot_digits_pipe.py
|
70
|
1813
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
chengsoonong/crowdastro
|
crowdastro/experiment/experiment_lr_rf.py
|
1
|
4222
|
"""Compares logistic regression to random forests, trained on Norris and Fan.
Matthew Alger
The Australian National University
2016
"""
import argparse
import collections
import logging
import h5py
import matplotlib
import matplotlib.pyplot as plt
import numpy
import sklearn
from . import runners
from .results import Results
from ..config import config
from ..plot import vertical_scatter_ba
def main(crowdastro_h5_path, training_h5_path, results_h5_path,
overwrite=False, plot=False):
with h5py.File(crowdastro_h5_path, 'r') as crowdastro_h5, \
h5py.File(training_h5_path, 'r') as training_h5:
ir_survey = training_h5.attrs['ir_survey']
ir_survey_ = crowdastro_h5.attrs['ir_survey']
assert ir_survey == ir_survey_
n_splits = crowdastro_h5['/{}/cdfs/test_sets'.format(ir_survey)].shape[0]
n_examples, n_params = training_h5['features'].shape
n_params += 1 # Bias term.
methods = ['LR(Norris)', 'LR(Fan)', 'RF(Norris)', 'RF(Fan)']
model = '{} sklearn.linear_model.LogisticRegression'.format(
sklearn.__version__) # No model for RF.
results = Results(results_h5_path, methods, n_splits, n_examples,
n_params, model)
features = collections.defaultdict(
lambda: training_h5['features'].value)
targets = {
'LR(Norris)':
crowdastro_h5['/{}/cdfs/norris_labels'.format(ir_survey)],
'LR(Fan)':
crowdastro_h5['/{}/cdfs/fan_labels'.format(ir_survey)],
'RF(Norris)':
crowdastro_h5['/{}/cdfs/norris_labels'.format(ir_survey)],
'RF(Fan)':
crowdastro_h5['/{}/cdfs/fan_labels'.format(ir_survey)],
}
for split_id, test_set in enumerate(
crowdastro_h5['/{}/cdfs/test_sets'.format(ir_survey)]):
logging.info('Test {}/{}'.format(split_id + 1, n_splits))
for method_id, method in enumerate(methods):
if method.startswith('LR'):
runner = runners.lr
else:
runner = runners.rf
logging.info('Method {} ({}/{})'.format(method, method_id + 1,
len(methods)))
if ir_survey == 'swire':
features_ = numpy.nan_to_num(features[method])
p2, p98 = numpy.percentile(
features_[:config['surveys']['swire']['n_features']],
[2, 98])
features_[features_ > p98] = p98
features_[features_ < 2] = p2
logging.info('Clamping to range {} -- {}'.format(p2, p98))
else:
features_ = features[method]
runner(results, method, split_id, features_,
targets[method], list(test_set), overwrite=overwrite)
if plot:
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = ['Palatino Linotype']
plt.figure(figsize=(6, 3)) # Shrink it a little for thesis.
vertical_scatter_ba(
results,
crowdastro_h5['/{}/cdfs/norris_labels'.format(ir_survey)].value,
violin=True)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--crowdastro', default='data/crowdastro.h5',
help='HDF5 crowdastro data file')
parser.add_argument('--training', default='data/training.h5',
help='HDF5 training data file')
parser.add_argument('--results', default='data/results_lr_rf.h5',
help='HDF5 results data file')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing results')
parser.add_argument('--plot', action='store_true', help='Generate a plot')
args = parser.parse_args()
logging.root.setLevel(logging.INFO)
main(args.crowdastro, args.training, args.results, overwrite=args.overwrite,
plot=args.plot)
|
mit
|
patrickbwarren/SunlightHNC
|
rpm_explorer.py
|
1
|
17506
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file includes unicode characters like π = 3.14159
# This file is part of SunlightDPD - a home for open source software
# related to the dissipative particle dynamics (DPD) simulation
# method.
# Main script copyright (c) 2009-2019 Unilever UK Central Resources Ltd
# (Registered in England & Wales, Company No 29140; Registered
# Office: Unilever House, Blackfriars, London, EC4P 4BQ, UK).
# ZoomPan was adapted from
# https://stackoverflow.com/questions/11551049/matplotlib-plot-zooming-with-scroll-wheel
# copyright (c) remains with the original authors.
# SunlightDPD is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# SunlightDPD is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SunlightDPD. If not, see <http://www.gnu.org/licenses/>.
# By default this is for RPM without solvent
# Run with --solvated for solvent primitive model
# MOUSE AND KEYBOARD CONTROLS
# pan and zoom in/out with mouse wheel work in the main plot window
# control + mouse wheel zooms in/out horizontal axis
# sliders can be adjusted with mouse (move pointer onto slider)
# click to jump to a given position
# scroll with mouse wheel for medium adjustment
# shift + mouse wheel for fine adjustment
# control + mouse wheel for coarse adjustment
# press and release spacebar to request entry of a specific value (in terminal window)
# The following correspond to the Fig 1 insets in Coupette et al., PRL 121, 075501 (2018)
# For this lB = 0.7 nm, sigma = 0.3 nm, [salt] = 1 M, and [solvent] = 10 M and 40 M.
# Note the use of computed values for T* = sigma/lB and rho*sigma^3. Also 1 M = 0.602 molecules per nm^3
# For the salt, rhoz = [Na+] + [Cl-] = 2 [NaCl], hence the factor 2 in --rhoz
# python3 rpm_explorer.py --solvated --tstar=0.3/0.7 --rhoz=2*0.602*0.3^3 --rhos=10*0.602*0.3^3
# python3 rpm_explorer.py --solvated --tstar=0.3/0.7 --rhoz=2*0.602*0.3^3 --rhos=40*0.602*0.3^3
# Add --diam='[0.25/0.3,0.3373/0.3,1]' to reproduce the size-asymmetric model shown in Fig S1.
import argparse
import math as m
import numpy as np
import matplotlib.pyplot as plt
from numpy import pi as π
from oz import wizard as w
from matplotlib.widgets import Slider, Button, RadioButtons
parser = argparse.ArgumentParser(description='Interactive RPM explorer')
parser.add_argument('--ng', action='store', default='16384', help='number of grid points (default 2^14 = 16384)')
parser.add_argument('--deltar', action='store', default=1e-3, type=float, help='grid spacing (default 1e-3)')
parser.add_argument('--alpha', action='store', default=0.2, type=float, help='Picard mixing fraction (default 0.2)')
parser.add_argument('--npic', action='store', default=6, type=int, help='number of Picard steps (default 6)')
parser.add_argument('--nps', action='store', default=6, type=int, help='length of history array (default 6)')
parser.add_argument('--maxsteps', action='store', default=100, type=int, help='number of iterations (default 100)')
parser.add_argument('--diam', action='store', default='[1]', help='hard core diameters (default [1])')
parser.add_argument('--sigma', action='store', default=0.0, type=float, help='inner core diameter (default min diam)')
parser.add_argument('--rhoz', action='store', default='0.1', help='total ion density (default 0.1)')
parser.add_argument('--rhos', action='store', default='0.4', help='added solvent density (default 0.4)')
parser.add_argument('--tstar', action='store', default='1.0', help='reduced temperature (default 1.0)')
parser.add_argument('--solvated', action='store_true', help='for solvated primitive models')
parser.add_argument('--rmax', action='store', default=15.0, type=float, help='maximum radial distance (default 15)')
parser.add_argument('--floor', action='store', default=1e-20, type=float, help='floor for r h(r) (default 1e-20)')
parser.add_argument('--verbose', action='store_true', help='more output')
args = parser.parse_args()
args.swap = False # which combination of h_ij to show
args.choice = ['both', 'both'] # which signs of h(r) to show
w.ncomp = 3 if args.solvated else 2
w.ng = eval(args.ng.replace('^', '**')) # catch 2^10 etc
w.deltar = args.deltar
w.alpha = args.alpha
w.npic = args.npic
w.nps = args.nps
w.maxsteps = args.maxsteps
w.verbose = args.verbose
w.initialise()
# if the user sets tstar to a string (eg 'infinity') this is caught here
try:
tstar_init = eval(args.tstar)
except NameError:
tstar_init = 0
w.lb = 1/tstar_init if tstar_init else 0.0
# Now construct the hard core diameters
diam = eval(args.diam)
if not isinstance(diam, list):
diam = [diam]
if len(diam) == 1: diam.append(diam[0])
w.diam[0, 0] = diam[0]
w.diam[0, 1] = 0.5*(diam[0] + diam[1])
w.diam[1, 1] = diam[1]
if args.solvated:
if len(diam) == 2: diam.append(0.5*(diam[0] + diam[1]))
w.diam[0, 2] = 0.5*(diam[0] + diam[2])
w.diam[1, 2] = 0.5*(diam[1] + diam[2])
w.diam[2, 2] = diam[2]
# Over-write pairwise diameters for non-additivity
if len(diam) == 4: w.diam[0, 1] = diam[3]
if len(diam) == 5: w.diam[0, 2] = w.diam[1, 2] = diam[4]
if len(diam) == 6: w.diam[1, 2] = diam[5]
w.sigma = args.sigma
w.rpm_potential()
rhoz_init = eval(args.rhoz.replace('^', '**')) # total charged species density
rhos_init = eval(args.rhos.replace('^', '**')) # added solvent density
def solve(rhoz, rhos):
"""solve the structure at the given densities"""
w.rho[0] = rhoz/2
w.rho[1] = rhoz/2
if args.solvated:
w.rho[2] = rhos
w.hnc_solve()
if w.return_code: exit()
def selector(individual):
"""return a list according to (hnn, hzz) and (h00, h01) representations"""
if individual:
return [[0.5, 0, 0.5, 'g', '(h00+h11)/2'], [0, 1, 0, 'b', 'h01']]
else:
return [[0.25, 0.5, 0.25, 'k', '(h00+2h01+h11)/4'], [0.25, -0.5, 0.25, 'r', '(h00-2h01+h11)/4']]
def update(val):
"""update state point from sliders, solve, and replot"""
if tstar_slider:
w.lb = 1 / tstar_slider.val
w.sigma = args.sigma
w.rpm_potential()
rhoz = 10**rhoz_slider.val
rhos = 10**rhos_slider.val if rhos_slider else rhos_init
solve(rhoz, rhos)
replot()
def get_ann_txt():
"""get a string for annotating the plot"""
rhoz = w.rho[0] + w.rho[1]
tstar = '%5.3f' % (1/w.lb) if w.lb else '∞'
if args.solvated:
rhos = w.rho[2]
msg = 'T* = %s ρz = %8.4f ρs = %8.4f HNC err = %0.1g' % (tstar, rhoz, rhos, w.error)
else:
msg = 'T* = %s ρz = %8.4f HNC err = %0.1g' % (tstar, rhoz, w.error)
return msg
def replot():
"""replot the lines and re-annotate"""
for i, (w00, w01, w11, color, text) in enumerate(selector(args.swap)):
r = w.r[imin:imax]
rh_pos = r * (w00*w.hr[imin:imax, 0, 0] + w01*w.hr[imin:imax, 0, 1] + w11*w.hr[imin:imax, 1, 1])
rh_neg = - rh_pos
rh_pos[rh_pos < args.floor] = args.floor
rh_neg[rh_neg < args.floor] = args.floor
if line[2*i]: # if line[i] is not None then reset the y data.
line[2*i].set_ydata(np.log10(rh_neg))
line[2*i+1].set_ydata(np.log10(rh_pos))
else: # plotting for the first time
line[2*i], = ax.plot(r, np.log10(rh_neg), color+'-')
line[2*i+1], = ax.plot(r, np.log10(rh_pos), color+'--')
label[i] = ax.annotate(text, xy=(0.2+0.4*i, 0.92), color=color, xycoords='axes fraction')
choice = args.choice[i]
line[2*i].set_linestyle('None' if choice == 'none' or choice == '-ve' else 'solid')
line[2*i+1].set_linestyle('None' if choice == 'none' or choice == '+ve' else 'dashed')
ann.set_text(get_ann_txt())
fig.canvas.draw_idle()
# Set up the plot area
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.30)
ax.set_xlim([1, args.rmax])
ax.set_ylim([-12, 1])
ax.set_xlabel('r / σ')
ax.set_ylabel('log10[- r h(r)]')
# report on diameters
txt = 'diams : %0.2f %0.2f' % (w.diam[0, 0], w.diam[1, 1])
if args.solvated:
txt = txt + ' %0.2f' % w.diam[2, 2]
txt = txt + ' excess : %0.2f' % (w.diam[0, 1] - 0.5*(w.diam[0, 0] + w.diam[1, 1]))
if args.solvated:
txt = txt + ' %0.2f' % (w.diam[0, 2] - 0.5*(w.diam[0, 0] + w.diam[2, 2]))
txt = txt + ' %0.2f' % (w.diam[1, 2] - 0.5*(w.diam[1, 1] + w.diam[2, 2]))
ax.annotate(txt, xy=(0.02, 1.08), xycoords='axes fraction')
# solve and make initial plot
imin = int(1.0 / w.deltar)
imax = int(args.rmax / w.deltar)
line = [None, None, None, None] # will contain the data for the lines
label = [None, None] # will contain the labels for the lines
ann = ax.annotate(get_ann_txt(), xy=(0.02, 1.02), xycoords='axes fraction')
solve(rhoz_init, rhos_init)
replot()
# Set up sliders for lB, rho_z, and rho_s if required
back_color = 'powderblue'
if tstar_init > 0:
ax_tstar = plt.axes([0.25, 0.05, 0.65, 0.03], facecolor=back_color)
tstar_slider = Slider(ax_tstar, 'T*', 0.0, max(2.0, tstar_init), valinit=tstar_init, valstep=0.01, valfmt='%5.3f')
tstar_slider.on_changed(update)
else:
tstar_slider = None
ax_rhoz = plt.axes([0.25, 0.10 if tstar_slider else 0.05, 0.65, 0.03], facecolor=back_color)
rhoz_slider = Slider(ax_rhoz, 'ρ_z', -3, 0, valinit=m.log10(rhoz_init), valfmt='%5.3f')
rhoz_slider.on_changed(update)
if args.solvated:
ax_rhos = plt.axes([0.25, 0.15 if tstar_slider else 0.10, 0.65, 0.03], facecolor=back_color)
rhos_slider = Slider(ax_rhos, 'ρ_s', -3, 0, valinit=m.log10(rhos_init), valfmt='%5.3f')
rhos_slider.on_changed(update)
else:
rhos_slider = None
# Set up buttons
def radio1(val):
"""Select between showing both, +ve, -ve or none for first h(r)"""
args.choice[0] = val
replot()
def radio2(val):
"""Select between showing both, +ve, -ve or none for second h(r)"""
args.choice[1] = val
replot()
radio = [radio1, radio2]
ax_choice = [None, None]
ax_choice[0] = plt.axes([0.05, 0.42, 0.1, 0.15])
ax_choice[1] = plt.axes([0.05, 0.25, 0.1, 0.15])
choice = [None, None]
for i in [0, 1]:
choice[i] = RadioButtons(ax_choice[i], ('none', '+ve', '-ve', 'both'), active=3)
choice[i].on_clicked(radio[i])
for i, (w00, w01, w11, color, text) in enumerate(selector(args.swap)):
[ label.set_color(color) for label in choice[i].labels ]
def swap(event):
"""swap between (hnn, hzz) and (h00, h01) representations"""
args.swap = not args.swap
for i, (w00, w01, w11, color, text) in enumerate(selector(args.swap)):
[ line[2*i+j].set_color(color) for j in [0, 1] ]
label[i].set_color(color)
label[i].set_text(text)
[ label.set_color(color) for label in choice[i].labels ]
replot()
ax_swap = plt.axes([0.05, 0.20, 0.1, 0.03])
swap_button = Button(ax_swap, 'swap', color=back_color, hovercolor='0.975')
swap_button.on_clicked(swap)
def reset(event):
"""reset all slider positions and plot area"""
if tstar_slider:
tstar_slider.reset()
rhoz_slider.reset()
if rhos_slider:
rhos_slider.reset()
ax.set_xlim([1, args.rmax])
ax.set_ylim([-12, 1])
ax.figure.canvas.draw()
ax_reset = plt.axes([0.05, 0.15, 0.1, 0.03])
reset_button = Button(ax_reset, 'reset', color=back_color, hovercolor='0.975')
reset_button.on_clicked(reset)
def dump(event):
"""write state point (T*, rho_z, kappa, [rho_s]) to std out"""
rhoz = w.rho[0] + w.rho[1]
kappa = m.sqrt(4*π*w.lb*rhoz)
rhos = w.rho[2] if args.solvated else 0
tstar = '%g' % (1/w.lb) if w.lb else '∞'
print('%s\t%g\t%g\t%g' % (tstar, rhos, rhoz, kappa))
ax_dump = plt.axes([0.05, 0.10, 0.1, 0.03])
dump_button = Button(ax_dump, 'dump', color=back_color, hovercolor='0.975')
dump_button.on_clicked(dump)
def quit(event):
exit(0)
ax_quit = plt.axes([0.05, 0.05, 0.1, 0.03])
quit_button = Button(ax_quit, 'quit', color=back_color, hovercolor='0.975')
quit_button.on_clicked(quit)
# ZoomPan was adapted from
# https://stackoverflow.com/questions/11551049/matplotlib-plot-zooming-with-scroll-wheel
# copyright (c) remains with the original authors.
class ZoomPan:
def __init__(self, ax):
self.ax = ax
self.press = False
self.cur_xlim = None
self.cur_ylim = None
self.xpress = None
self.ypress = None
self.control_down = False
def factory(self, base_scale=1.1):
def zoom(event):
if event.inaxes != self.ax: return
cur_xlim = self.ax.get_xlim()
cur_ylim = self.ax.get_ylim()
xdata, ydata = event.xdata, event.ydata
if event.button == 'down':
scale_factor = 1 / base_scale
elif event.button == 'up':
scale_factor = base_scale
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
if self.control_down:
new_height = (cur_ylim[1] - cur_ylim[0])
else:
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
self.ax.set_xlim([xdata - new_width * (1-relx), xdata + new_width * (relx)])
self.ax.set_ylim([ydata - new_height * (1-rely), ydata + new_height * (rely)])
self.ax.figure.canvas.draw()
def button_down(event):
if event.inaxes != self.ax: return
self.cur_xlim = self.ax.get_xlim()
self.cur_ylim = self.ax.get_ylim()
self.press = True
self.xpress, self.ypress = event.xdata, event.ydata
def button_up(event):
self.press = False
self.ax.figure.canvas.draw()
def mouse_move(event):
if self.press is False: return
if event.inaxes != self.ax: return
dx = event.xdata - self.xpress
dy = event.ydata - self.ypress
self.cur_xlim -= dx
self.cur_ylim -= dy
self.ax.set_xlim(self.cur_xlim)
self.ax.set_ylim(self.cur_ylim)
self.ax.figure.canvas.draw()
def key_down(event):
if event.key == 'control':
self.control_down = True
def key_up(event):
if event.key == 'control':
self.control_down = False
fig = self.ax.get_figure()
fig.canvas.mpl_connect('button_press_event', button_down)
fig.canvas.mpl_connect('button_release_event', button_up)
fig.canvas.mpl_connect('motion_notify_event', mouse_move)
fig.canvas.mpl_connect('key_press_event', key_down)
fig.canvas.mpl_connect('key_release_event', key_up)
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom, button_down, button_up, mouse_move
zp = ZoomPan(ax).factory()
class SliderScroll:
def __init__(self, ax):
self.ax = ax
self.shift_down = False
self.control_down = False
def factory(self, sliders, superfine_scale=0.001, fine_scale=0.01, coarse_scale=0.1):
def scroll(event):
if event.inaxes in sliders:
slider = sliders[event.inaxes]
saltus = superfine_scale if self.shift_down else coarse_scale if self.control_down else fine_scale
val = slider.val
if event.button == 'down':
val = val - saltus
elif event.button == 'up':
val = val + saltus
slider.set_val(val)
update(val)
def key_down(event):
if event.key == 'shift':
self.shift_down = True
elif event.key == 'control':
self.control_down = True
def key_up(event):
if event.key == 'shift':
self.shift_down = False
elif event.key == 'control':
self.control_down = False
elif event.key == ' ':
if event.inaxes in sliders:
slider = sliders[event.inaxes]
s = input('enter a value for %s\n' % slider_name[slider])
try:
val = float(s)
print('%s set to %g' % (slider_name[slider], val))
slider.set_val(m.log10(val) if log_slider[slider] else val)
update(val)
except ValueError:
print('invalid number ', s)
fig = self.ax.get_figure()
fig.canvas.mpl_connect('scroll_event', scroll)
fig.canvas.mpl_connect('key_press_event', key_down)
fig.canvas.mpl_connect('key_release_event', key_up)
return scroll, key_up, key_down
sliders = {ax_rhoz: rhoz_slider}
slider_name = {rhoz_slider: 'rho_z'}
log_slider = {rhoz_slider: True}
if tstar_slider:
sliders[ax_tstar] = tstar_slider
slider_name[tstar_slider] = 'T*'
log_slider[tstar_slider] = False
if rhos_slider:
sliders[ax_rhos] = rhos_slider
slider_name[rhos_slider] = 'rho_s'
log_slider[rhos_slider] = True
ss = SliderScroll(ax).factory(sliders)
plt.show()
|
gpl-2.0
|
caseyching/incubator-airflow
|
airflow/hooks/hive_hooks.py
|
13
|
25357
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import collections
import unicodecsv as csv
import itertools
import logging
import re
import subprocess
import time
from tempfile import NamedTemporaryFile
import hive_metastore
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.helpers import as_flattened_list
from airflow.utils.file import TemporaryDirectory
from airflow import configuration
import airflow.security.utils as utils
HIVE_QUEUE_PRIORITIES = ['VERY_HIGH', 'HIGH', 'NORMAL', 'LOW', 'VERY_LOW']
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
Parameters passed here can be overridden by run_cli's hive_conf param
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:type mapred_queue: string
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:type mapred_queue_priority: string
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:type mapred_job_name: string
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None,
mapred_queue=None,
mapred_queue_priority=None,
mapred_job_name=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.auth = conn.extra_dejson.get('auth', 'noSasl')
self.conn = conn
self.run_as = run_as
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
"Invalid Mapred Queue Priority. Valid values are: "
"{}".format(', '.join(HIVE_QUEUE_PRIORITIES)))
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _prepare_cli_cmd(self):
"""
This function creates the command list from available information
"""
conn = self.conn
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = "jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if configuration.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get(
'principal', "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(
utils.get_components(template))
proxy_user = "" # noqa
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url += ";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = jdbc_url.format(**locals())
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _prepare_hiveconf(self, d):
"""
This function prepares a list of hiveconf params
from a dictionary of key value pairs.
:param d:
:type d: dict
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(
itertools.izip(
["-hiveconf"] * len(d),
["{}={}".format(k, v) for k, v in d.items()]
)
)
def run_cli(self, hql, schema=None, verbose=True, hive_conf=None):
"""
Run an hql statement using the hive cli. If hive_conf is specified
it should be a dict and the entries will be set as key/value pairs
in HiveConf
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
:type hive_conf: dict
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql.encode('UTF-8'))
f.flush()
hive_cmd = self._prepare_cli_cmd()
hive_conf_params = self._prepare_hiveconf(hive_conf)
if self.mapred_queue:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.queuename={}'
.format(self.mapred_queue)])
if self.mapred_queue_priority:
hive_conf_params.extend(
['-hiveconf',
'mapreduce.job.priority={}'
.format(self.mapred_queue_priority)])
if self.mapred_job_name:
hive_conf_params.extend(
['-hiveconf',
'mapred.job.name={}'
.format(self.mapred_job_name)])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(['-f', f.name])
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
while True:
line = sp.stdout.readline()
if not line:
break
stdout += line.decode('UTF-8')
if verbose:
logging.info(line.decode('UTF-8').strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.terminate()
time.sleep(60)
self.sp.kill()
class HiveMetastoreHook(BaseHook):
""" Wrapper to interact with the Hive Metastore"""
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
"""
Returns a Hive thrift client.
"""
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.get('core', 'security') == 'kerberos' and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
"""
Checks whether a partition exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def check_for_named_partition(self, schema, table, partition_name):
"""
Checks whether a partition with a given name exists
:param schema: Name of hive schema (database) @table belongs to
:type schema: string
:param table: Name of hive table @partition belongs to
:type schema: string
:partition: Name of the partitions to check for (eg `a=b/c=d`)
:type schema: string
:rtype: boolean
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
self.metastore._oprot.trans.open()
try:
self.metastore.get_partition_by_name(
schema, table, partition_name)
return True
except hive_metastore.ttypes.NoSuchObjectException:
return False
finally:
self.metastore._oprot.trans.close()
def get_table(self, table_name, db='default'):
"""Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
"""
Get a metastore table object
"""
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
"""
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
"""
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
"""
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
def table_exists(self, table_name, db='default'):
"""
Check if table exists
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
t = self.get_table(table_name, db)
return True
except Exception as e:
return False
class HiveServer2Hook(BaseHook):
"""
Wrapper around the impyla library
Note that the default authMechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI as in
"""
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'PLAIN')
kerberos_service_name = None
if configuration.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = db.extra_dejson.get('kerberos_service_name', 'hive')
# impyla uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == 'KERBEROS':
logging.warning("Detected deprecated 'KERBEROS' for authMechanism for %s. Please use 'GSSAPI' instead",
self.hiveserver2_conn_id)
auth_mechanism = 'GSSAPI'
from impala.dbapi import connect
return connect(
host=db.host,
port=db.port,
auth_mechanism=auth_mechanism,
kerberos_service_name=kerberos_service_name,
user=db.login,
database=db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
from impala.error import ProgrammingError
with self.get_conn() as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
cur = conn.cursor()
for statement in hql:
cur.execute(statement)
records = []
try:
# impala Lib raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
records = cur.fetchall()
except ProgrammingError:
logging.debug("get_results returned no records")
if records:
results = {
'data': records,
'header': cur.description,
}
return results
def to_csv(
self,
hql,
csv_filepath,
schema='default',
delimiter=',',
lineterminator='\r\n',
output_header=True,
fetch_size=1000):
schema = schema or 'default'
with self.get_conn() as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.description
with open(csv_filepath, 'wb') as f:
writer = csv.writer(f,
delimiter=delimiter,
lineterminator=lineterminator,
encoding='utf-8')
if output_header:
writer.writerow([c[0] for c in cur.description])
i = 0
while True:
rows = [row for row in cur.fetchmany(fetch_size) if row]
if not rows:
break
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
"""
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
"""
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
"""
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c[0] for c in res['header']]
return df
|
apache-2.0
|
mzwiessele/topslam
|
topslam/examples.py
|
1
|
5925
|
#===============================================================================
# Copyright (c) 2016, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of topslam nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from topslam.plotting import plot_comparison
from topslam.pseudo_time.tree_correction import ManifoldCorrectionTree
def example_deng(optimize=True, plot=True):
import pandas as pd, os
import GPy, numpy as np
from topslam.filtering import filter_RNASeq
# Reproduceability, BGPLVM has local optima
np.random.seed(42)
# This is the process of how we loaded the data:
ulabels = ['Zygote',
'2-cell embryo',
'Early 2-cell blastomere', 'Mid 2-cell blastomere', 'Late 2-cell blastomere',
'4-cell blastomere', '8-cell blastomere', '16-cell blastomere',
'Early blastocyst cell', 'Mid blastocyst cell', 'Late blastocyst cell',
'fibroblast',
'adult liver',
]
folder_path = os.path.expanduser('~/tmp/Deng')
csv_file = os.path.join(folder_path, 'filtered_expression_values.csv')
if os.path.exists(csv_file):
print('Loading previous filtered data: {}'.format(csv_file))
Y_bgplvm = pd.read_csv(csv_file, index_col=[0,1,2], header=0)
else:
print('Loading data:')
data = GPy.util.datasets.singlecell_rna_seq_deng()
if not os.path.exists(folder_path):
os.mkdir(folder_path)
Ydata = data['Y'].copy()
Ydata.columns = Ydata.columns.to_series().apply(str.upper)
Ydata = Ydata.reset_index().set_index('index', append=True)
Ydata['labels'] = data['labels'].values
Ydata = Ydata.set_index('labels', append=True)
Ydata = Ydata.reorder_levels([0,2,1])
Ydata = Ydata.reset_index([0,2]).loc[ulabels].set_index(['level_0', 'index'], append=True)
Y = Ydata.copy()
Y.columns = [c.split('.')[0] for c in Y.columns]
Y_bgplvm = filter_RNASeq(Y)
print('\nSaving data to tmp file: {}'.format(csv_file))
Y_bgplvm.to_csv(csv_file)
labels = Y_bgplvm.index.get_level_values(0).values
Ymean = Y_bgplvm.values.mean()
Ystd = Y_bgplvm.values.std()
Y_m = Y_bgplvm.values
Y_m -= Ymean
Y_m /= Ystd
# get the labels right for split experiments
# get the labels right for 8 and split
new_8_labels = []
for _l in Y_bgplvm.loc['8-cell blastomere'].index.get_level_values(1):
_l = _l.split('-')[0]
if not('split' in _l):
new_8_labels.append('8')
elif not('pooled' in _l):
new_8_labels.append('8 split')
else:
new_8_labels.append('8 split')
labels[labels=='8-cell blastomere'] = new_8_labels
# get the labels right for 16 and split
new_16_labels = []
for _l in Y_bgplvm.loc['16-cell blastomere'].index.get_level_values(1):
_l = _l.split('-')[0]
if not('split' in _l):
new_16_labels.append('16')
elif not('pooled' in _l):
new_16_labels.append('16 split')
else:
new_16_labels.append('16 split')
labels[labels=='16-cell blastomere'] = new_16_labels
ulabels = []
for lab in labels:
if lab not in ulabels:
ulabels.append(lab)
short_labels = labels.copy()
_ulabels_convert = np.array([
'Z',# Z',
'E',# Em',
'2',# Bm E',
'2',# Bm M',
'2',# Bm L',
'4',
'8',
'8 s',
'16',
'16 s',
'Bz',# E',
'Bz',# M',
'Bz',# L'
'F',
'L'
])
short_ulabels = []
for lab, nlab in zip(ulabels, _ulabels_convert):
short_labels[short_labels==lab] = nlab
if nlab not in short_ulabels:
short_ulabels.append(nlab)
from topslam.optimization import run_methods, methods, create_model, optimize_model
X_init, dims = run_methods(Y_m, methods)
m = create_model(Y_m, X_init, num_inducing=25)
m.Ymean = Ymean
m.Ystd = Ystd
m.data_labels = short_labels
m.data_ulabels = short_ulabels
m.data = Y_bgplvm
m.X_init = X_init
m.dims = dims
if optimize:
optimize_model(m)
if plot:
mc = ManifoldCorrectionTree(m)
plot_comparison(mc, X_init, dims, m.data_labels, m.data_ulabels, 0)
return m
|
bsd-3-clause
|
neale/CS-program
|
434-MachineLearning/final_project/linearClassifier/sklearn/cluster/setup.py
|
5
|
1654
|
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.c'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
unlicense
|
dhimmel/seaborn
|
examples/structured_heatmap.py
|
24
|
1304
|
"""
Discovering structure in heatmap data
=====================================
_thumb: .4, .2
"""
import pandas as pd
import seaborn as sns
sns.set(font="monospace")
# Load the brain networks example dataset
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Select a subset of the networks
used_networks = [1, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Create a custom palette to identify the networks
network_pal = sns.cubehelix_palette(len(used_networks),
light=.9, dark=.1, reverse=True,
start=1, rot=-2)
network_lut = dict(zip(map(str, used_networks), network_pal))
# Convert the palette to vectors that will be drawn on the side of the matrix
networks = df.columns.get_level_values("network")
network_colors = pd.Series(networks).map(network_lut)
# Create a custom colormap for the heatmap values
cmap = sns.diverging_palette(h_neg=210, h_pos=350, s=90, l=30, as_cmap=True)
# Draw the full plot
sns.clustermap(df.corr(), row_colors=network_colors, linewidths=.5,
col_colors=network_colors, figsize=(13, 13), cmap=cmap)
|
bsd-3-clause
|
mirams/PyHillFit
|
python/PyHillFit.py
|
1
|
42057
|
import matplotlib
"""I have found that these two lines are needed on *some* computers to prevent matplotlib figure windows from opening.
In general, I save the figures but do not actually open the matplotlib figure windows.
Try uncommenting this line if annoying unwanted figure windows open."""
matplotlib.use('Agg')
import doseresponse as dr
import numpy as np
import numpy.random as npr
import itertools as it
import time
import sys
import os
import argparse
import scipy.stats as st
try:
import cma
except:
sys.exit("couldn't find module cma")
from distutils.version import LooseVersion
latest_tested_version = "2.6.0"
installed_version = cma.__version__.split()[0]
if LooseVersion(installed_version) < LooseVersion(latest_tested_version):
print "Version {} of cma installed. Latest tested version is {}.".format(installed_version, latest_tested_version)
sys.exit("Please upgrade cma to use PyHillFit: https://pypi.org/project/cma/")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as mpatches
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--iterations", type=int, help="number of MCMC iterations",default=500000)
parser.add_argument("-t", "--thinning", type=int, help="how often to thin the MCMC, i.e. save every t-th iteration",default=5)
parser.add_argument("-b", "--burn-in-fraction", type=int, help="given N saved MCMC iterations, discard the first N/b as burn-in",default=4)
parser.add_argument("-a", "--all", action='store_true', help='run hierarchical MCMC on all drugs and channels', default=False)
parser.add_argument('-ppp', '--plot-parameter-paths', action='store_true', help='plot the path taken by each parameter through the (thinned) MCMC',default=False)
parser.add_argument("-c", "--num-cores", type=int, help="number of cores to parallelise drug/channel combinations",default=1)
parser.add_argument("-Ne", "--num-expts", type=int, help="how many experiments to fit to", default=0)
parser.add_argument("--num-APs", type=int, help="how many (alpha,mu) samples to take for AP simulations", default=500)
parser.add_argument("--hierarchical", action='store_true', help="run hierarchical MCMC algorithm",default=False)
parser.add_argument("-bfo", "--best-fit-only", action='store_true', help="only do CMA-ES best fit, then quit",default=False)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument("--data-file", type=str, help="csv file from which to read in data, in same format as provided crumb_data.csv", required=True)
requiredNamed.add_argument("-m", "--model", type=int, help="For non-hierarchical (put anything for hierarchical):1. fix Hill=1; 2. vary Hill", required=True)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
dr.define_model(args.model)
temperature = 1
num_params = dr.num_params
# load data from specified data file
dr.setup(args.data_file)
# list drug and channel options, select from command line
# can select more than one of either
drugs_to_run, channels_to_run = dr.list_drug_channel_options(args.all)
"""if dr.dir_name == "PyHillFit_input_file":
print "Removing hERG from list of channels to run"
try:
channels_to_run.remove("hERG")
except:
pass"""
# log-likelihood (same as log-target for uniform priors) for single-level MCMC
def log_likelihood_single_vary_hill(measurements,doses,theta):
hill, pIC50, sigma = theta
IC50 = dr.pic50_to_ic50(pIC50)
return -len(measurements) * np.log(sigma) - np.sum((measurements-dr.dose_response_model(doses,hill,IC50))**2)/(2.*sigma**2)
# as usual in our MCMC, omitted the -n/2*log(2pi) term from the log-likelihood, as this is always cancelled out
# log-likelihood (same as log-target for uniform priors) for single-level MCMC
def log_likelihood_single_fix_hill(measurements,doses,theta):
# using hill = 1, but not bothering to assign it
pIC50, sigma = theta
IC50 = dr.pic50_to_ic50(pIC50)
return -len(measurements) * np.log(sigma) - np.sum((measurements-dr.dose_response_model(doses,1,IC50))**2)/(2.*sigma**2)
# as usual in our MCMC, omitted the -n/2*log(2pi) term from the log-likelihood, as this is always cancelled out
# for finding starting point for MCMC, so if we later decide pIC50 can go down to -2, it doesn't matter, it will just take a few
# iterations to decide if it wants to go in that direction
def sum_of_square_diffs(_params,doses,responses):
pIC50, hill = _params
IC50 = dr.pic50_to_ic50(pIC50)
test_responses = dr.dose_response_model(doses,hill,IC50)
return np.sum((test_responses-responses)**2)
# analytic solution for sigma to maximise likelihood from Normal distribution
def initial_sigma(n,sum_of_squares):
return np.sqrt((1.*sum_of_squares)/n)
# for all parts of the log target distribution:
# -inf is ok, NaN is not!
# np.log(negative) = nan, so we need to catch negatives first and set the target to -inf
# this is ok because it's equivalent to the parameter being in a region of 0 likelihood
# therefore I've put loads of warning messages, and it will abort if it doesn't catch any problems
# if CMA-ES finds a good (legal) starting point for the MCMC, it shouldn't really get any NaNs
def log_data_likelihood(hill_is,pic50_is,sigma,experiments):
Ne = len(experiments)
answer = 0.
for i in range(Ne):
ic50 = dr.pic50_to_ic50(pic50_is[i])
concs = experiments[i][:,0]
num_expt_pts = len(concs)
data = experiments[i][:,1]
model_responses = dr.dose_response_model(concs,hill_is[i],ic50)
exp_bit = np.sum((data-model_responses)**2)/(2*sigma**2)
# assuming noise Normal is truncated at 0 and 100
truncated_scale = np.sum(np.log(st.norm.cdf(100,model_responses,sigma)-st.norm.cdf(0,model_responses,sigma)))
answer -= (num_expt_pts*np.log(sigma) + exp_bit + truncated_scale)
if np.isnan(answer):
print "NaN from log_data_likelihood!"
print "hill_is =", hill_is
print "pic50_is =", pic50_is
print "sigma =", sigma
sys.exit()
return answer
def log_hill_i_log_logistic_likelihood(x,alpha,beta):
answer = np.log(beta) - beta*np.log(alpha) + (beta-1.)*np.log(x) - 2*np.log(1+(x/alpha)**beta)
if np.any(np.isnan(answer)):
print "NaN from log_hill_i_log_logistic_likelihood!"
print "x =", x
print "alpha =", alpha
print "beta =", beta
sys.exit()
return answer
def log_pic50_i_logistic_likelihood(x,mu,s):
temp_bit = (x-mu)/s
answer = -temp_bit - np.log(s) - 2*np.log(1+np.exp(-temp_bit))
if np.any(np.isnan(answer)):
print "NaN from log_pic50_i_logistic_likelihood!"
print "x =", x
print "mu =", mu
print "s =", s
sys.exit()
else:
return answer
def log_beta_prior(x,alpha,beta,a,b):
if (x<a) or (x>b):
return -np.inf
else:
answer = (alpha-1)*np.log(x-a) + (beta-1)*np.log(b-x)
if np.isnan(answer):
print "NaN from log_beta_prior!"
print "x =", x
print "alpha =", alpha
print "beta =", beta
print "a =", a
print "b =", b
sys.exit()
else:
return answer
def log_target_distribution(experiments,theta,shapes,scales,locs):
dim = len(theta)
Ne = len(experiments)
if np.any(theta[:4] <= locs[:4]):
return -np.inf
alpha,beta,mu,s = theta[:4]
pic50_is = theta[4:-1:2]
hill_is = theta[5:-1:2]
sigma = theta[-1]
if np.any(hill_is<0) or np.any(pic50_is<pic50_prior[0]) or (sigma<=locs[-1]): # these are just checking if in support of prior, roughly
return -np.inf
total = log_data_likelihood(hill_is,pic50_is,sigma,experiments)
total += np.sum(log_hill_i_log_logistic_likelihood(hill_is,alpha,beta))
total += np.sum(log_pic50_i_logistic_likelihood(pic50_is,mu,s))
total += np.sum(dr.log_gamma_prior(theta[[0,1,2,3,-1]],shapes,scales,locs))
if np.isnan(total):
print "NaN from log_target_distribution!"
print "theta =", theta
sys.exit()
else:
return total
def log_logistic_mode(alpha,beta): # from Wikipedia
return alpha * ((beta-1.)/(beta+1.))**(1./beta)
def log_logistic_variance(alpha,beta): # from Wikipedia
return alpha**2 * (2*np.pi/(beta*np.sin(2*np.pi/beta)) - (np.pi/(beta*np.sin(np.pi/beta)))**2)
def logistic_mode(mu,s): # from Wikipedia
return mu
def logistic_variance(mu,s): # from Wikipedia
return np.pi**2 * s**2 / 3.
# hierarchical MCMC
def run_hierarchical(drug_channel):
global pic50_prior
pic50_prior = [-2.] # bad way to deal with sum_of_square_diffs in hierarchical case
global pic50_hill_lowers
pic50_hill_priors_lowers = np.array([-2., 0.])
drug, channel = drug_channel
print "\n\n{} + {}\n\n".format(drug,channel)
# for reproducible results, otherwise choose a different seed
seed = 1
num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug,channel)
if (0 < (args.num_expts) < num_expts):
num_expts = args.num_expts
experiment_numbers = [x for x in experiment_numbers[:num_expts]]
experiments = [x for x in experiments[:num_expts]]
elif (args.num_expts==0):
print "Fitting to all datasets\n"
else:
print "You've asked to fit to an impossible number of experiments for {} + {}\n".format(drug,channel)
print "Therefore proceeding with all experiments in the input data file\n"
# set up where to save chains and figures to
# also renames anything with a '/' in its name and changes it to a '_'
drug, channel, output_dir, chain_dir, figs_dir, chain_file = dr.hierarchical_output_dirs_and_chain_file(drug,channel,num_expts)
best_fits = []
for expt in experiment_numbers:
start = time.time()
x0 = np.array([2.5, 1.]) # (pIC50,Hill) not fitting sigma by CMA-ES
sigma0 = 0.1
opts = cma.CMAOptions()
opts['seed'] = expt
es = cma.CMAEvolutionStrategy(x0, sigma0, opts)
while not es.stop():
X = es.ask()
es.tell(X, [sum_of_square_diffs(x**2+pic50_hill_priors_lowers,experiments[expt][:,0],experiments[expt][:,1]) for x in X])
res = es.result
best_fits.append(np.concatenate((res[0]**2+pic50_hill_priors_lowers, [initial_sigma(len(experiments[expt][:,0]),res[1])])))
best_fits = np.array(best_fits)
fig = plt.figure(figsize=(5.5,4.5))
ax = fig.add_subplot(111)
ax.set_xscale('log')
xmin = 1000
xmax = -1000
for expt in experiments:
a = np.min(expt[:,0])
b = np.max(expt[:,0])
if a < xmin:
xmin = a
if b > xmax:
xmax = b
xmin = int(np.log10(xmin))-1
xmax = int(np.log10(xmax))+3
num_x_pts = 101
x = np.logspace(xmin,xmax,num_x_pts)
# from http://colorbrewer2.org
colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928']
skip_best_fits_plot = False
if (num_expts>len(colors)):
skip_best_fits_plot = True
print "Not enough colours to print all experiments' best fits, so skipping that"
if (not skip_best_fits_plot):
for expt in experiment_numbers:
print "best_fits:", best_fits
print "best_fits[{}]:".format(expt), best_fits[expt]
ax.plot(x, dr.dose_response_model(x, best_fits[expt,1], dr.pic50_to_ic50(best_fits[expt,0])),color=colors[expt],lw=2)
ax.scatter(experiments[expt][:,0],experiments[expt][:,1],label='Expt {}'.format(expt+1),color=colors[expt],s=100)
ax.set_ylim(0,100)
ax.set_xlim(min(x),max(x))
ax.set_xlabel(r'{} concentration ($\mu$M)'.format(drug))
ax.set_ylabel('% {} block'.format(channel))
ax.legend(loc=2)
ax.grid()
ax.set_title('Hills = {}\nIC50s = {}'.format([round(best_fits[expt,1],1) for expt in experiment_numbers],[round(dr.pic50_to_ic50(best_fits[expt,0]),1) for expt in experiment_numbers]))
fig.tight_layout()
fig.savefig(figs_dir+'{}_{}_cma-es_best_fits.png'.format(drug,channel))
fig.savefig(figs_dir+'{}_{}_cma-es_best_fits.pdf'.format(drug,channel))
plt.close()
locs = np.array([0.,2.,-4,0.01,dr.sigma_loc]) # lower bounds for alpha,beta,mu,s,sigma
sigma_cur = np.mean(best_fits[:,-1])
if (sigma_cur <= locs[3]):
sigma_cur = locs[3]+0.1
print "sigma_cur =", sigma_cur
# find initial alpha and beta values by fitting log-logistic distribution to best fits
# there is an inbuilt fit function, but I found it to be unreliable for some reason
x0 = np.array([0.5,0.5])
sigma0 = 0.1
opts = cma.CMAOptions()
opts['seed'] = 1
es = cma.CMAEvolutionStrategy(x0, sigma0, opts)
while not es.stop():
X = es.ask()
es.tell(X, [-np.product(st.fisk.pdf(best_fits[:,1],c=x[1],scale=x[0],loc=0)) for x in X])
res = es.result
alpha_cur, beta_cur = np.copy(res[0])
if alpha_cur <= locs[0]:
alpha_cur = locs[0]+0.1
if beta_cur <= locs[1]:
beta_cur = locs[1]+0.1
# here I have used the fit function, for some reason this one worked more consitently
# but again, the starting point for MCMC is not too important
# a bad starting position can increase the time you have to run MCMC for to get a "converged" output
# at worst, it can get stuck in a local optimum, but we haven't found this to be a problem yet
mu_cur, s_cur = st.logistic.fit(best_fits[:,0])
if mu_cur <= locs[2]:
mu_cur = locs[2]+0.1
if s_cur <= locs[3]:
s_cur = locs[3]+0.1
first_iteration = np.concatenate(([alpha_cur,beta_cur,mu_cur,s_cur],best_fits[:,:-1].flatten(),[sigma_cur]))
print "first mcmc iteration:\n", first_iteration
# these are the numbers taken straight from Elkins (see paper for reference)
elkins_hill_alphas = np.array([1.188, 1.744, 1.530, 0.930, 0.605, 1.325, 1.179, 0.979, 1.790, 1.708, 1.586, 1.469, 1.429, 1.127, 1.011, 1.318, 1.063])
elkins_hill_betas = 1./np.array([0.0835, 0.1983, 0.2089, 0.1529, 0.1206, 0.2386, 0.2213, 0.2263, 0.1784, 0.1544, 0.2486, 0.2031, 0.2025, 0.1510, 0.1837, 0.1677, 0.0862])
elkins_pic50_mus = np.array([5.235,5.765,6.060,5.315,5.571,7.378,7.248,5.249,6.408,5.625,7.321,6.852,6.169,6.217,5.927,7.414,4.860])
elkins_pic50_sigmas = np.array([0.0760,0.1388,0.1459,0.2044,0.1597,0.2216,0.1856,0.1560,0.1034,0.1033,0.1914,0.1498,0.1464,0.1053,0.1342,0.1808,0.0860])
elkins = [elkins_hill_alphas,elkins_hill_betas,elkins_pic50_mus,elkins_pic50_sigmas]
# building Gamma prior distributions for alpha,beta,mu,s(,sigma, but sigma not from elkins)
# wide enough to cover Elkins values and allow room for extra variation
alpha_mode = np.mean(elkins_hill_alphas)
beta_mode = np.mean(elkins_hill_betas)
mu_mode = np.mean(elkins_pic50_mus)
s_mode = np.mean(elkins_pic50_sigmas)
sigma_mode = dr.sigma_mode
modes = np.array([alpha_mode, beta_mode-2., mu_mode, s_mode, sigma_mode])
print "modes:", modes
# designed for priors to have modes at means of elkins data, but width is more important
shapes = np.array([5.,2.5,7.5,2.5,dr.sigma_shape]) # must all be greater than 1
scales = (modes-locs)/(shapes-1.)
labels = [r'$\alpha$',r'$\beta$',r'$\mu$',r'$s$',r'$\sigma$']
file_labels = ['alpha','beta','mu','s','sigma']
# ranges to plot priors
mins = [0,0,-5,0,0]
maxs = [8,22,20,2,25]
prior_xs = []
priors = []
total_axes = (6,4)
fig = plt.figure(figsize=(6,7))
for i in range(len(labels)-1):
if i==0:
axloc = (0,0)
elif i==1:
axloc = (0,2)
elif i==2:
axloc = (2,0)
elif i==3:
axloc = (2,2)
ax = plt.subplot2grid(total_axes, axloc,colspan=2,rowspan=2)
x_prior = np.linspace(mins[i],maxs[i],501)
prior = st.gamma.pdf(x_prior,a=shapes[i],scale=scales[i],loc=locs[i])
prior_xs.append(x_prior)
priors.append(prior)
ax.plot(x_prior,prior,label='Gamma prior',lw=2)
ax.set_xlabel(labels[i])
ax.set_ylabel('Probability density')
ax.set_xlim(mins[i],maxs[i])
ax.grid()
priormax = np.max(prior)
hist, bin_edges = np.histogram(elkins[i], bins=10)
histmax = np.max(hist)
w = bin_edges[1]-bin_edges[0]
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.
# scaled histogram just to fit plot better, but this scaling doesn't matter
ax.bar(bin_edges[:-1], priormax/histmax*hist, width=w,color='gray',edgecolor='grey')
i = len(labels)-1
ax = plt.subplot2grid(total_axes, (4,1),colspan=2,rowspan=2)
x_prior = np.linspace(mins[i],maxs[i],501)
prior = st.gamma.pdf(x_prior,a=shapes[i],scale=scales[i],loc=locs[i])
ax.plot(x_prior,prior,label='Gamma prior',lw=2)
prior_xs.append(x_prior)
priors.append(prior)
ax.set_xlabel(labels[i])
ax.set_ylabel('Probability density')
ax.set_xlim(mins[i],maxs[i])
ax.grid()
fig.tight_layout()
fig.savefig(figs_dir+'all_prior_distributions.png')
fig.savefig(figs_dir+'all_prior_distributions.pdf')
plt.close()
#sys.exit # uncomment this if you just want to plot the priors and then quit
# create/wipe MCMC output file
with open(chain_file,'w') as outfile:
outfile.write("# Hill ~ log-logistic(alpha,beta), pIC50 ~ logistic(mu,s)\n")
outfile.write("# alpha, beta, mu, s, hill_1, pic50_1, hill_2, pic50_2, ..., hill_Ne, pic50_Ne, sigma\n") # this is the order of parameters stored in the chain
# have to choose initial covariance matrix for proposal distribution
# we set it to a diagonal with entries scaled to the initial parameter values
first_cov = np.diag(0.01*np.abs(first_iteration))
mean_estimate = np.copy(first_iteration)
dim = len(first_iteration)
# we do not start adaptation straight away
# just to give the algorithm a chance to look around
# many of these pre-adaptation proposals will probably be rejected, if the initial step size is too lareg
when_to_adapt = 100*dim
theta_cur = np.copy(first_iteration)
cov_cur = np.copy(first_cov)
print "theta_cur =", theta_cur
log_target_cur = log_target_distribution(experiments,theta_cur,shapes,scales,locs)
print "initial log_target_cur =", log_target_cur
# effectively step size, scales covariance matrix
loga = 0.
# what fraction of proposed samples are being accepted into the chain
acceptance = 0.
# what fraction of samples we WANT accepted into the chain
# loga updates itself to try to make this dream come true
target_acceptance = 0.25
# perform thinning to reduce autocorrelation (make saved iterations more closely represent independent samples from target distribution)
# also saves file space, win win
thinning = args.thinning
try:
total_iterations = args.iterations
except:
total_iterations = 200000
# after what fraction of total_iterations to print a little status message
status_when = 10000
saved_iterations = total_iterations/thinning+1
pre_thin_burn = total_iterations/4
# we discard the first quarter of iterations, as this gen
burn = saved_iterations/4
# pre-allocate the space for MCMC iterations
# not a problem when we don't need to do LOADS of iterations
# but might become more of a hassle if we wanted to run it for ages along with loads of parameters
chain = np.zeros((saved_iterations,dim+1))
chain[0,:] = np.copy(np.concatenate((first_iteration,[log_target_cur])))
# MCMC!
start = time.time()
t = 1
while t <= total_iterations:
theta_star = npr.multivariate_normal(theta_cur, np.exp(loga)*cov_cur)
log_target_star = log_target_distribution(experiments,theta_star,shapes,scales,locs)
accept_prob = npr.rand()
if (np.log(accept_prob) < log_target_star - log_target_cur):
theta_cur = theta_star
log_target_cur = log_target_star
accepted = 1
else:
accepted = 0
acceptance = ((t-1.)*acceptance + accepted)/t
if (t>when_to_adapt):
s = t - when_to_adapt
gamma_s = 1/(s+1)**0.6
temp_covariance_bit = np.array([theta_cur-mean_estimate])
cov_cur = (1-gamma_s) * cov_cur + gamma_s * np.dot(np.transpose(temp_covariance_bit),temp_covariance_bit)
mean_estimate = (1-gamma_s) * mean_estimate + gamma_s * theta_cur
loga += gamma_s*(accepted-target_acceptance)
if t%thinning==0:
chain[t/thinning,:] = np.concatenate((np.copy(theta_cur),[log_target_cur]))
if (t%status_when==0):
print "{} / {}".format(t/status_when,total_iterations/status_when)
time_taken_so_far = time.time()-start
estimated_time_left = time_taken_so_far/t*(total_iterations-t)
print "Time taken: {} s = {} min".format(np.round(time_taken_so_far,1),np.round(time_taken_so_far/60,2))
print "acceptance = {}".format(np.round(acceptance,5))
print "Estimated time remaining: {} s = {} min".format(np.round(estimated_time_left,1),np.round(estimated_time_left/60,2))
t += 1
print "**********"
print "final_iteration =", chain[-1,:]
with open(chain_file,'a') as outfile:
np.savetxt(outfile,chain)
# save (alpha,mu) samples to be used as (Hill,pIC50) values in AP simulations
# these are direct 'top-level' samples, not samples from the posterior predictive distributions
indices = npr.randint(burn,saved_iterations,args.num_APs)
samples_file = dr.alpha_mu_downsampling(drug,channel)
AP_samples = chain[indices,:]
print "saving (alpha,mu) samples to", samples_file
with open(samples_file,'w') as outfile:
outfile.write('# {} (alpha,mu) samples from hierarchical MCMC for {} + {}\n'.format(args.num_APs,drug,channel))
np.savetxt(outfile,AP_samples[:,[0,2]])
# this can be a quick visual check to see if the chain is mixing well
# it will plot one big tall figure with all parameter paths plotted
if args.plot_parameter_paths:
fig = plt.figure(figsize=(10,4*dim))
ax0 = fig.add_subplot(dim,1,1)
ax0.plot(chain[:,0])
ax0.set_ylabel(r'$\alpha$')
plt.setp(ax0.get_xticklabels(), visible=False)
for i in range(1,dim):
ax = fig.add_subplot(dim,1,i+1,sharex=ax0)
ax.plot(chain[:t,i])
if i < dim-1:
plt.setp(ax.get_xticklabels(), visible=False)
elif i==1:
y_label = r'$\beta$'
elif i==2:
y_label = r'$\mu$'
elif i==3:
y_label = r'$s$'
elif (i%2==0)and(i<dim-1):
y_label = r'$pIC50_{'+str(i/2-1)+'}$'
elif (i<dim-1):
y_label = r'$Hill_{'+str(i/2-1)+'}$'
else:
y_label = r'$\sigma$'
ax.set_xlabel('Iteration (thinned)')
ax.set_ylabel(y_label)
fig.tight_layout()
fig.savefig(figs_dir+'{}_{}_parameter_paths.png'.format(drug,channel))
plt.close()
# plot all marginal posteriors separately, after discarding burn-in
# also a good visual check to see if it looks like they have converged
marginals_dir = figs_dir+'marginals/png/'
if not os.path.exists(marginals_dir):
os.makedirs(marginals_dir)
for i in range(dim):
fig = plt.figure(figsize=(5,4))
ax = fig.add_subplot(111)
ax.hist(chain[burn:,i],bins=50,normed=True,color='blue',edgecolor='blue')
ax.set_ylabel('Marginal probability density')
if i==0:
x_label = r'$\alpha$'
filename = 'alpha'
elif i==1:
x_label = r'$\beta$'
filename = 'beta'
elif i==2:
x_label = r'$\mu$'
filename = 'mu'
elif i==3:
x_label = r'$s$'
filename = 's'
elif (i%2==0)and(i<dim-1):
x_label = r'$Hill_{'+str(i/2-1)+'}$'
filename = 'hill_{}'.format(i/2-1)
elif (i<dim-1):
x_label = r'$pIC50_{'+str(i/2-1)+'}$'
filename = 'pic50_{}'.format(i/2-1)
else:
x_label = r'$\sigma$'
filename = 'sigma'
ax.set_xlabel(x_label)
fig.tight_layout()
fig.savefig(marginals_dir+'{}_{}_{}_marginal.png'.format(drug,channel,filename))
#fig.savefig(marginals_dir+'{}_{}_{}_marginal.pdf'.format(drug,channel,filename))
plt.close()
total_axes = (6,4)
fig = plt.figure(figsize=(6,7))
for i in range(5): # have to do sigma separately
if i==0:
axloc = (0,0)
elif i==1:
axloc = (0,2)
elif i==2:
axloc = (2,0)
elif i==3:
axloc = (2,2)
elif i==4:
axloc = (4,0)
ax = plt.subplot2grid(total_axes, axloc,colspan=2,rowspan=2)
ax.set_xlabel(labels[i])
ax.set_ylabel('Probability density')
ax.grid()
if (i<4):
min_sample = np.min(chain[burn:,i])
max_sample = np.max(chain[burn:,i])
ax.hist(chain[burn:,i],bins=50,normed=True,color='blue',edgecolor='blue')
elif (i==4):
min_sample = np.min(chain[burn:,-2])
max_sample = np.max(chain[burn:,-2])
ax.hist(chain[burn:,-2],bins=50,normed=True,color='blue',edgecolor='blue') # -1 would be log-target
ax.set_xlim(min_sample,max_sample)
pts_in_this_range = np.where((prior_xs[i] >= min_sample) & (prior_xs[i] <= max_sample))
x_in_this_range = prior_xs[i][pts_in_this_range]
prior_in_this_range = priors[i][pts_in_this_range]
line = ax.plot(x_in_this_range,prior_in_this_range,lw=2,color='red',label='Prior distributions')
if (i==0 or i==3):
plt.xticks(rotation=90)
leg_ax = plt.subplot2grid(total_axes, (4,2),colspan=2,rowspan=2)
leg_ax.axis('off')
hist = mpatches.Patch(color='blue', label='Normalised histograms')
leg_ax.legend(handles=line+[hist],loc="center",fontsize=12,bbox_to_anchor=[0.38,0.7])
fig.tight_layout()
fig.savefig(figs_dir+'all_prior_distributions_and_marginals.png')
fig.savefig(figs_dir+'all_prior_distributions_and_marginals.pdf')
plt.close()
print "Marginal plots saved in", marginals_dir
print "\n\n{} + {} complete!\n\n".format(drug,channel)
# single-level MCMC
def run_single_level(drug_channel):
drug, channel = drug_channel
print "\n\n{} + {}\n\n".format(drug,channel)
seed = 100
try:
num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug,channel)
except:
print "Problem loading data, guessing there are no entries for {} + {} --- skipping".format(drug, channel)
return None
drug,channel,chain_file,images_dir = dr.nonhierarchical_chain_file_and_figs_dir(args.model, drug, channel, temperature)
concs = np.array([])
responses = np.array([])
for i in xrange(num_expts):
concs = np.concatenate((concs,experiments[i][:,0]))
responses = np.concatenate((responses,experiments[i][:,1]))
if np.any(np.isnan(responses)):
print "Skipping {} because of empty responses / missing data".format(drug_channel)
return None
#print experiments
#print concs
#print responses
where_r_0 = responses==0
where_r_100 = responses==100
where_r_other = (0<responses) & (responses<100)
#print "where_r_0:", where_r_0
#print "where_r_100:", where_r_100
#print "where_r_other:", where_r_other
pi_bit = dr.compute_pi_bit_of_log_likelihood(where_r_other)
# plot priors
for i in xrange(num_params):
fig = plt.figure(figsize=(4,3))
ax = fig.add_subplot(111)
ax.grid()
ax.plot(dr.prior_xs[i], dr.prior_pdfs[i], color='blue', lw=2)
ax.set_xlabel(dr.labels[i])
ax.set_ylabel("Prior pdf")
fig.tight_layout()
fig.savefig(images_dir+dr.file_labels[i]+"_prior_pdf.pdf")
plt.close()
start = time.time()
sigma0 = 0.1
opts = cma.CMAOptions()
opts['seed'] = seed
if args.model==1:
#x0 = np.array([2.5, 3.])
x0 = np.array([2.5, 1.])
es = cma.CMAEvolutionStrategy(x0, sigma0, opts)
while not es.stop():
X = es.ask()
#es.tell(X, [-dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, x**2 + [dr.pic50_exp_lower,dr.sigma_uniform_lower], temperature, pi_bit) for x in X])
es.tell(X, [sum_of_square_diffs([x[0]**2+dr.pic50_exp_lower, 1.],concs,responses) for x in X])
es.disp()
res = es.result
#pic50_cur, sigma_cur = res[0]**2 + [dr.pic50_exp_lower, dr.sigma_uniform_lower]
pic50_cur = res[0][0]**2 + dr.pic50_exp_lower
hill_cur = 1
elif args.model==2:
#x0 = np.array([2.5, 1., 3.])
x0 = np.array([2.5, 1.])
es = cma.CMAEvolutionStrategy(x0, sigma0, opts)
while not es.stop():
X = es.ask()
#es.tell(X, [-dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, x**2 + [dr.pic50_exp_lower, dr.hill_uniform_lower, dr.sigma_uniform_lower], temperature, pi_bit) for x in X])
es.tell(X, [sum_of_square_diffs(x**2+[dr.pic50_exp_lower, dr.hill_uniform_lower],concs,responses) for x in X])
es.disp()
res = es.result
#pic50_cur, hill_cur, sigma_cur = res[0]**2 + [dr.pic50_exp_lower, dr.hill_uniform_lower, dr.sigma_uniform_lower]
pic50_cur, hill_cur = res[0]**2 + [dr.pic50_exp_lower, dr.hill_uniform_lower]
sigma_cur = initial_sigma(len(responses),res[1])
#print "sigma_cur:", sigma_cur
if args.model==1:
theta_cur = np.array([pic50_cur,sigma_cur])
elif args.model==2:
theta_cur = np.array([pic50_cur,hill_cur,sigma_cur])
#print "theta_cur:", theta_cur
best_params_file = images_dir+"{}_{}_best_fit_params.txt".format(drug, channel)
with open(best_params_file, "w") as outfile:
outfile.write("# CMA-ES best fit params\n")
if args.model==1:
outfile.write("# pIC50, sigma, (Hill=1, not included)\n")
elif args.model==2:
outfile.write("# pIC50, Hill, sigma\n")
np.savetxt(outfile, [theta_cur])
proposal_scale = 0.05
mean_estimate = np.copy(theta_cur)
cov_estimate = proposal_scale*np.diag(np.copy(np.abs(theta_cur)))
cmaes_ll = dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, theta_cur, temperature, pi_bit)
#print "cmaes_ll:", cmaes_ll
best_fit_fig = plt.figure(figsize=(5,4))
best_fit_ax = best_fit_fig.add_subplot(111)
best_fit_ax.set_xscale('log')
best_fit_ax.grid()
if np.min(concs) == 0:
plot_lower_lim = int(np.log10(np.min(concs[np.nonzero(concs)])))-2
else:
plot_lower_lim = int(np.log10(np.min(concs)))-2
plot_upper_lim = int(np.log10(np.max(concs)))+2
best_fit_ax.set_xlim(10**plot_lower_lim,10**plot_upper_lim)
best_fit_ax.set_ylim(0,100)
num_x_pts = 1001
x_range = np.logspace(plot_lower_lim,plot_upper_lim,num_x_pts)
best_fit_curve = dr.dose_response_model(x_range,hill_cur,dr.pic50_to_ic50(pic50_cur))
best_fit_ax.plot(x_range,best_fit_curve,label='Best fit',lw=2)
best_fit_ax.set_ylabel('% {} block'.format(channel))
best_fit_ax.set_xlabel(r'{} concentration ($\mu$M)'.format(drug))
best_fit_ax.set_title(r'$pIC50 = {}, Hill = {}; SS = {}$'.format(np.round(pic50_cur,2),np.round(hill_cur,2),round(res[1],2)))
best_fit_ax.plot(concs,responses,"o",color='orange',ms=10,label='Data',zorder=10)
best_fit_ax.legend(loc=2)
best_fit_fig.tight_layout()
best_fit_fig.savefig(images_dir+'{}_{}_model_{}_CMA-ES_best_fit.png'.format(drug,channel,args.model))
best_fit_fig.savefig(images_dir+'{}_{}_model_{}_CMA-ES_best_fit.pdf'.format(drug,channel,args.model))
plt.close()
if args.best_fit_only:
print "\nStopping {}+{} after doing and plotting best fit\n".format(drug, channel)
return None
# let MCMC look around for a bit before adaptive covariance matrix
# same rule (100*dimension) as in hierarchical case
when_to_adapt = 1000*num_params
log_target_cur = dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, theta_cur, temperature, pi_bit)
#print "initial log_target_cur =", log_target_cur
# effectively step size, scales covariance matrix
loga = 0.
# what fraction of proposed samples are being accepted into the chain
acceptance = 0.
# what fraction of samples we WANT accepted into the chain
# loga updates itself to try to make this dream come true
target_acceptance = 0.25
total_iterations = args.iterations
thinning = args.thinning
assert(total_iterations%thinning==0)
# how often to print a little status message
status_when = total_iterations / 20
saved_iterations = total_iterations/thinning+1
# also want to store log-target value at each iteration
chain = np.zeros((saved_iterations,num_params+1))
chain[0,:] = np.concatenate((np.copy(theta_cur),[log_target_cur]))
#print chain[0]
#print "concs:", concs
#print "responses:", responses
# for reproducible results, otherwise select a new random seed
seed = 25
npr.seed(seed)
# MCMC!
t = 1
start = time.time()
while t <= total_iterations:
theta_star = npr.multivariate_normal(theta_cur,np.exp(loga)*cov_estimate)
accepted = 0
log_target_star = dr.log_target(responses, where_r_0, where_r_100, where_r_other, concs, theta_star, temperature, pi_bit)
accept_prob = npr.rand()
if (np.log(accept_prob) < log_target_star - log_target_cur):
theta_cur = theta_star
log_target_cur = log_target_star
accepted = 1
acceptance = ((t-1.)*acceptance + accepted)/t
if (t>when_to_adapt):
s = t - when_to_adapt
gamma_s = 1/(s+1)**0.6
temp_covariance_bit = np.array([theta_cur-mean_estimate])
cov_estimate = (1-gamma_s) * cov_estimate + gamma_s * np.dot(np.transpose(temp_covariance_bit),temp_covariance_bit)
mean_estimate = (1-gamma_s) * mean_estimate + gamma_s * theta_cur
loga += gamma_s*(accepted-target_acceptance)
if (t%thinning==0):
chain[t/thinning,:] = np.concatenate((np.copy(theta_cur),[log_target_cur]))
if (t%status_when==0):
#print "{} / {}".format(t/status_when,total_iterations/status_when)
time_taken_so_far = time.time()-start
estimated_time_left = time_taken_so_far/t*(total_iterations-t)
#print "Time taken: {} s = {} min".format(np.round(time_taken_so_far,1),np.round(time_taken_so_far/60,2))
#print "acceptance = {}".format(np.round(acceptance,5))
#print "Estimated time remaining: {} s = {} min".format(np.round(estimated_time_left,1),np.round(estimated_time_left/60,2))
t += 1
#print "\nTime taken to do {} MCMC iterations: {} s\n".format(total_iterations, time.time()-start)
#print "Final iteration:", chain[-1,:], "\n"
burn_fraction = args.burn_in_fraction
burn = saved_iterations/burn_fraction
chain = chain[burn:,:] # remove burn-in before saving
with open(chain_file,'w') as outfile:
outfile.write('# Nonhierarchical MCMC output for {} + {}: (Hill,pIC50,sigma,log-target)\n'.format(drug,channel))
np.savetxt(outfile,chain)
best_ll_index = np.argmax(chain[:,num_params])
best_ll_row = chain[best_ll_index,:]
#print "Best log-likelihood:", "\n", best_ll_row
figs = []
axs = []
# plot all marginal posterior distributions
for i in range(num_params):
figs.append(plt.figure())
axs.append([])
axs[i].append(figs[i].add_subplot(211))
axs[i][0].hist(chain[:,i], bins=40, normed=True, color='blue', edgecolor='blue')
axs[i][0].legend()
axs[i][0].set_title("MCMC marginal distributions")
axs[i][0].set_ylabel("Normalised frequency")
axs[i][0].grid()
plt.setp(axs[i][0].get_xticklabels(), visible=False)
axs[i].append(figs[i].add_subplot(212,sharex=axs[i][0]))
axs[i][1].plot(chain[:,i],range(burn,saved_iterations))
axs[i][1].invert_yaxis()
axs[i][1].set_xlabel(dr.labels[i])
axs[i][1].set_ylabel('Saved MCMC iteration')
axs[i][1].grid()
figs[i].tight_layout()
figs[i].savefig(images_dir+'{}_{}_model_{}_{}_marginal.png'.format(drug,channel,args.model,dr.file_labels[i]))
plt.close()
# plot log-target path
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
ax3.plot(range(burn, saved_iterations), chain[:,-1])
ax3.set_xlabel('MCMC iteration')
ax3.set_ylabel('log-target')
ax3.grid()
fig2.tight_layout()
fig2.savefig(images_dir+'log_target.png')
plt.close()
# plot scatterplot matrix of posterior(s)
colormin, colormax = 1e9,0
norm = matplotlib.colors.Normalize(vmin=5,vmax=10)
hidden_labels = []
count = 0
# there's probably a better way to do this
# I plot all the histograms to normalize the colours, in an attempt to give a better comparison between the pairwise plots
while count < 2:
axes = {}
matrix_fig = plt.figure(figsize=(3*num_params,3*num_params))
for i in range(num_params):
for j in range(i+1):
ij = str(i)+str(j)
subplot_position = num_params*i+j+1
if i==j:
axes[ij] = matrix_fig.add_subplot(num_params,num_params,subplot_position)
axes[ij].hist(chain[:,i],bins=50,normed=True,color='blue', edgecolor='blue')
elif j==0: # this column shares x-axis with top-left
axes[ij] = matrix_fig.add_subplot(num_params,num_params,subplot_position,sharex=axes["00"])
counts, xedges, yedges, Image = axes[ij].hist2d(chain[:,j],chain[:,i],cmap='hot_r',bins=50,norm=norm)
maxcounts = np.amax(counts)
if maxcounts > colormax:
colormax = maxcounts
mincounts = np.amin(counts)
if mincounts < colormin:
colormin = mincounts
else:
axes[ij] = matrix_fig.add_subplot(num_params,num_params,subplot_position,sharex=axes[str(j)+str(j)],sharey=axes[str(i)+"0"])
counts, xedges, yedges, Image = axes[ij].hist2d(chain[:,j],chain[:,i],cmap='hot_r',bins=50,norm=norm)
maxcounts = np.amax(counts)
if maxcounts > colormax:
colormax = maxcounts
mincounts = np.amin(counts)
if mincounts < colormin:
colormin = mincounts
axes[ij].xaxis.grid()
if (i!=j):
axes[ij].yaxis.grid()
if i!=num_params-1:
hidden_labels.append(axes[ij].get_xticklabels())
if j!=0:
hidden_labels.append(axes[ij].get_yticklabels())
if i==j==0:
hidden_labels.append(axes[ij].get_yticklabels())
if i==num_params-1:
axes[str(i)+str(j)].set_xlabel(dr.labels[j], fontsize=18)
if j==0 and i>0:
axes[str(i)+str(j)].set_ylabel(dr.labels[i], fontsize=18)
plt.xticks(rotation=30)
norm = matplotlib.colors.Normalize(vmin=colormin,vmax=colormax)
count += 1
plt.setp(hidden_labels, visible=False)
matrix_fig.tight_layout()
matrix_fig.savefig(images_dir+"{}_{}_model_{}_scatterplot_matrix.png".format(drug,channel,args.model))
matrix_fig.savefig(images_dir+"{}_{}_model_{}_scatterplot_matrix.pdf".format(drug,channel,args.model))
plt.close()
print "\n\n{} + {} complete!\n\n".format(drug,channel)
return None
if args.hierarchical:
run = run_hierarchical
elif (not args.hierarchical): # assume single-level MCMC if hierarchical not specified
run = run_single_level
drugs_channels = it.product(drugs_to_run,channels_to_run)
if (args.num_cores<=1) or (len(drugs_to_run)==1):
for drug_channel in drugs_channels:
run(drug_channel)
#try:
# run(drug_channel)
#except KeyboardInterrupt:
# sys.exit("\nAborting everything\n")
#except:
# print "Failed to run", drug_channel
# try/except is good when running multiple MCMCs and leaving them overnight,say
# if one or more crash then the others will survive!
# however, if you need more "control", comment out the try/except, and uncomment the other run(drug_channel) line
#try:
# run(drug_channel)
#except Exception,e:
# print e
# print "Failed to run {} + {}!".format(drug_channel[0],drug_channel[1])
# run multiple MCMCs in parallel
elif (args.num_cores>1):
import multiprocessing as mp
num_cores = min(args.num_cores, mp.cpu_count()-1)
pool = mp.Pool(processes=num_cores)
pool.map_async(run,drugs_channels).get(99999)
pool.close()
pool.join()
|
bsd-3-clause
|
dhhagan/py-openaq
|
docs/examples/pollution_outlook_delhi.py
|
3
|
1182
|
"""
Compare All Pollutants at Anand Vihar in Delhi
==============================================
_thumb: .4, .4
"""
import matplotlib.pyplot as plt
import seaborn as sns
import openaq
sns.set(style="white", palette='muted', font_scale=1.35, color_codes=True)
api = openaq.OpenAQ()
# grab the data
df = api.measurements(city='Delhi', location='Anand Vihar', limit=10000, df=True)
# clean up the data by removing values below 0
df = df.query("value >= 0.0")
# Map the gas species from ugm3 to ppb (gas-phase species only)
df['corrected'] = df.apply(lambda x: openaq.utils.mass_to_mix(x['value'], x['parameter'], unit='ppb'), axis=1)
# Build a custom plot function to make nice datetime plots
def dateplot(y, **kwargs):
ax = plt.gca()
data = kwargs.pop("data")
rs = kwargs.pop("rs", '12h')
data.resample(rs).mean().plot(y=y, ax=ax, grid=False, **kwargs)
# Set up a FacetGrid
g = sns.FacetGrid(df, col='parameter', col_wrap=3, size=4, hue='parameter', sharey=False)
# Map the dataframe to the grid
g.map_dataframe(dateplot, "corrected", rs='12h')
# Set the titles
g.set_titles("{col_name}", fontsize=16)
# Set the axis labels
g.set_axis_labels("", "value")
|
mit
|
hlin117/statsmodels
|
statsmodels/sandbox/examples/example_gam.py
|
33
|
2343
|
'''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
|
bsd-3-clause
|
bhargav/scikit-learn
|
examples/mixture/plot_gmm.py
|
36
|
2875
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
bsd-3-clause
|
krez13/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
JeanKossaifi/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
230
|
7880
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
quheng/scikit-learn
|
sklearn/metrics/cluster/__init__.py
|
312
|
1322
|
"""
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
|
bsd-3-clause
|
jaidevd/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
168
|
1793
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
|
bsd-3-clause
|
Nu3001/external_chromium_org
|
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
|
24
|
10036
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
yanlend/scikit-learn
|
sklearn/linear_model/passive_aggressive.py
|
97
|
10879
|
# Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
|
bsd-3-clause
|
mounicmadiraju/dataasservices
|
data-science-analysis/gender-classification/gender-classification.py
|
2
|
1594
|
from sklearn import tree
from sklearn.svm import SVC
from sklearn.linear_model import Perceptron
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Data and labels
X = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40], [190, 90, 47], [175, 64, 39],
[177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]
Y = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female', 'female', 'male', 'male']
# Classifiers
# using the default values for all the hyperparameters
clf_tree = tree.DecisionTreeClassifier()
clf_svm = SVC()
clf_perceptron = Perceptron()
clf_KNN = KNeighborsClassifier()
# Training the models
clf_tree.fit(X, Y)
clf_svm.fit(X, Y)
clf_perceptron.fit(X, Y)
clf_KNN.fit(X, Y)
# Testing using the same data
pred_tree = clf_tree.predict(X)
acc_tree = accuracy_score(Y, pred_tree) * 100
print('Accuracy for DecisionTree: {}'.format(acc_tree))
pred_svm = clf_svm.predict(X)
acc_svm = accuracy_score(Y, pred_svm) * 100
print('Accuracy for SVM: {}'.format(acc_svm))
pred_per = clf_perceptron.predict(X)
acc_per = accuracy_score(Y, pred_per) * 100
print('Accuracy for perceptron: {}'.format(acc_per))
pred_KNN = clf_KNN.predict(X)
acc_KNN = accuracy_score(Y, pred_KNN) * 100
print('Accuracy for KNN: {}'.format(acc_KNN))
# The best classifier from svm, per, KNN
index = np.argmax([acc_svm, acc_per, acc_KNN])
classifiers = {0: 'SVM', 1: 'Perceptron', 2: 'KNN'}
print('Best gender classifier is {}'.format(classifiers[index]))
|
apache-2.0
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/sklearn/utils/tests/test_utils.py
|
47
|
9089
|
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
|
mit
|
gully/PyKE
|
pyke/kepflatten.py
|
2
|
18551
|
from .utils import PyKEArgumentHelpFormatter
from . import kepio, kepmsg, kepkey, kepfit, kepstat, kepfunc
import re
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from astropy.io import fits as pyfits
from tqdm import tqdm
__all__ = ['kepflatten']
def kepflatten(infile, outfile=None, datacol='PDCSAP_FLUX',
errcol='PDCSAP_FLUX_ERR', nsig=3., stepsize=0.5, winsize=5.0,
npoly=3, niter=1, ranges='0,0', plot=False, overwrite=False,
verbose=False, logfile='kepflatten.log'):
"""
kepflatten -- Remove low frequency variability from time-series, preserve
transits and flares
kepflatten detrends data for low-frequency photometric structure by
dividing by the mean of best-fit sliding polynomials over a sequential
series of small time ranges across the data. For example, a typical
timestamp is fit three times of ``stepsize=1.0`` and ``winsize=3.0``. The
adopted fit to the timestamp will be the mean of the three values. Outliers
are iteratively-clipped from the fit, therefore structure in e.g.
short-lived transits or flares are better preserved compared to e.g.
bandpass filtering methods (``kepfilter``). Optionally, input data, best
fits, fit outliers and output data are rendered to a plot window. In many
respects kepflatten performs the opposite task to kepoutlier which removes
statistical outliers while preserving low-frequency structure in light
curves.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file. outfile will be a direct copy of
infile but with NaN timestamps removed and two new columns in the 1st
extension - DETSAP_FLUX (a flattened or detrended for low-frequency
variations version of the data) and DETSAP_FLUX_ERR (the associated
1-:math:`\sigma` error).
datacol : str
The column name containing data stored within extension 1 of infile.
Typically this name is SAP_FLUX (Simple Aperture Photometry fluxes),
PDCSAP_FLUX (Pre-search Data Conditioning fluxes) or CBVSAP_FLUX
(SAP_FLUX corrected for systematic artifacts by the PyKE tool
kepcotrend).
errcol : str
The column name containing photometric 1-:math:`\sigma` errors
stored within extension 1 of infile. Typically this name is
SAP_FLUX_ERR (Simple Aperture Photometry fluxes), PDCSAP_FLUX_ERR
(Pre-search Data Conditioning fluxes). The error column coupled to
CBVSAP_FLUX data is SAP_FLUX_ERR. kepflatten normalizes datacol and
errcol consistently using a series of best fit polynomials.
nsig : float
The sigma clipping threshold in units of standard deviation. Data
deviating from a best fit function by more than the threshold will
ignored during subsequent fit iterations.
stepsize : float
The data within datacol is unlikely to be well represented by a single
polynomial function. stepsize splits the data up into a series of time
blocks, each is fit independently by a separate function. The user can
provide an informed choice of stepsize after inspecting the data with
the kepdraw tool. Units are days.
winsize : float
The size of the window to be fit during each step. Units are days.
winsize must be greater or equal to stepsize. winsize >> stepsize is
recommended.
npoly : integer
The order of each piecemeal polynomial function.
niter : integer
If outliers outside of nsig are found in a particular data section,
that data will be removed temporarily and the time series fit again.
This will be iterated niter times before freezing upon the best current
fit.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset.
Time ranges are supplied as comma-separated pairs of Barycentric Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon. An example
containing two time ranges is:
``2455012.48517,2455014.50072;2455022.63487,2455025.08231``.
If the user wants to correct the entire time series then providing
``ranges='0,0'`` will tell the task to operate on the whole time
series.
plot : bool
Plot the data, fit, outliers and result?
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepflatten kplr012557548-2011177032512_llc.fits
--nsig 3 --stepsize 1.0 --winsize 3.0 --npoly 3 --niter 10 --plot
--overwrite --verbose
.. image:: ../_static/images/api/kepflatten.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPFLATTEN -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' errcol={}'.format(errcol)
+ ' nsig={}'.format(nsig)
+ ' stepsize={}'.format(stepsize)
+ ' winsize={}'.format(winsize)
+ ' npoly={}'.format(npoly)
+ ' niter={}'.format(niter)
+ ' ranges={}'.format(ranges)
+ ' plot={}'.format(plot)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPFLATTEN started at', logfile, verbose)
# test winsize > stepsize
if winsize < stepsize:
errmsg = 'ERROR -- KEPFLATTEN: winsize must be greater than stepsize'
kepmsg.err(logfile, errmsg, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPFLATTEN: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile, 'readonly')
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
try:
work = instr[0].header['FILEVER']
cadenom = 1.0
except:
cadenom = cadence
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# read table structure
table = kepio.readfitstab(infile, instr[1], logfile, verbose)
# filter input data table
try:
datac = table.field(datacol)
except:
errmsg = ('ERROR -- KEPFLATTEN: cannot find or read data column {}'
.format(datacol))
kepmsg.err(logfile, message, verbose)
try:
err = table.field(errcol)
except:
errmsg = ('WARNING -- KEPFLATTEN: cannot find or read error column {}'
.format(errcol))
kepmsg.warn(logfile, errmsg, verbose)
errcol = 'None'
if errcol.lower() == 'none' or errcol == 'PSF_FLUX_ERR':
err = datac * cadence
err = np.sqrt(np.abs(err)) / cadence
work1 = np.array([table.field('time'), datac, err])
else:
work1 = np.array([table.field('time'), datac, err])
work1 = np.rot90(work1, 3)
work1 = work1[~np.isnan(work1).any(1)]
# read table columns
intime = work1[:, 2] + bjdref
indata = work1[:, 1]
inerr = work1[:, 0]
if len(intime) == 0:
message = 'ERROR -- KEPFLATTEN: one of the input arrays is all NaN'
kepmsg.err(logfile, message, verbose)
# time ranges for region to be corrected
t1, t2 = kepio.timeranges(ranges, logfile, verbose)
cadencelis = kepstat.filterOnRange(intime, t1, t2)
# find limits of each time step
tstep1, tstep2 = [], []
work = intime[0]
while work <= intime[-1]:
tstep1.append(work)
tstep2.append(np.array([work + winsize, intime[-1]],
dtype='float64').min())
work += stepsize
# find cadence limits of each time step
cstep1, cstep2 = [], []
for n in range(len(tstep1)):
for i in range(len(intime)-1):
if intime[i] <= tstep1[n] and intime[i+1] > tstep1[n]:
for j in range(i, len(intime)-1):
if intime[j] < tstep2[n] and intime[j+1] >= tstep2[n]:
cstep1.append(i)
cstep2.append(j + 1)
# comment keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
# clean up x-axis unit
intime0 = tstart // 100 * 100.0
ptime = intime - intime0
xlab = 'BJD $-$ {}'.format(intime0)
# clean up y-axis units
pout = copy(indata)
nrm = len(str(int(pout.max()))) - 1
pout = pout / 10 ** nrm
ylab = '10$^{}$'.format(nrm) + 'e$^-$ s$^{-1}$'
# data limits
xmin = ptime.min()
xmax = ptime.max()
ymin = pout.min()
ymax = pout.max()
xr = xmax - xmin
yr = ymax - ymin
ptime = np.insert(ptime, [0], [ptime[0]])
ptime = np.append(ptime, [ptime[-1]])
pout = np.insert(pout, [0], [0.0])
pout = np.append(pout, 0.0)
if plot:
plt.figure()
plt.clf()
# plot data
ax = plt.axes([0.06, 0.54, 0.93, 0.43])
# force tick labels to be absolute rather than relative
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
# rotate y labels by 90 deg
labels = ax.get_yticklabels()
plt.setp(plt.gca(), xticklabels=[])
plt.plot(ptime[1:-1], pout[1:-1], color='#363636', linestyle='-',
linewidth=1.0)
plt.fill(ptime, pout, color='#a8a7a7', linewidth=0.0, alpha=0.2)
plt.ylabel(ylab, {'color' : 'k'})
plt.grid()
# loop over each time step, fit data, determine rms
fitarray = np.zeros((len(indata), len(cstep1)), dtype='float32')
sigarray = np.zeros((len(indata), len(cstep1)), dtype='float32')
fitarray[:, :] = np.nan
sigarray[:, :] = np.nan
masterfit = indata * 0.0
mastersigma = np.zeros(len(masterfit))
functype = getattr(kepfunc, 'poly' + str(npoly))
for i in tqdm(range(len(cstep1))):
timeSeries = intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]]
dataSeries = indata[cstep1[i]:cstep2[i]+1]
fitTimeSeries = np.array([], dtype='float32')
fitDataSeries = np.array([], dtype='float32')
pinit = [dataSeries.mean()]
if npoly > 0:
for j in range(npoly):
pinit.append(0.0)
pinit = np.array(pinit, dtype='float32')
try:
if len(fitarray[cstep1[i]:cstep2[i]+1,i]) > len(pinit):
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty = \
kepfit.lsqclip(functype, pinit, timeSeries, dataSeries,
None, nsig, nsig, niter, logfile, verbose)
fitarray[cstep1[i]:cstep2[i]+1, i] = 0.0
sigarray[cstep1[i]:cstep2[i]+1, i] = sigma
for j in range(len(coeffs)):
fitarray[cstep1[i]:cstep2[i]+1, i] += coeffs[j] * timeSeries ** j
except:
message = ('WARNING -- KEPFLATTEN: could not fit range '
+ str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]))
kepmsg.warn(logfile, message, verbose)
# find mean fit for each timestamp
for i in range(len(indata)):
masterfit[i] = np.nanmean(fitarray[i, :])
mastersigma[i] = np.nanmean(sigarray[i, :])
masterfit[-1] = masterfit[-4] #fudge
masterfit[-2] = masterfit[-4] #fudge
masterfit[-3] = masterfit[-4] #fudge
if plot:
plt.plot(intime - intime0, masterfit / 10 ** nrm, 'b')
# reject outliers
rejtime, rejdata = [], []
naxis2 = 0
for i in range(len(masterfit)):
if (abs(indata[i] - masterfit[i]) > nsig * mastersigma[i]
and i in cadencelis):
rejtime.append(intime[i])
rejdata.append(indata[i])
rejtime = np.array(rejtime, dtype='float64')
rejdata = np.array(rejdata, dtype='float32')
if plot:
plt.plot(rejtime - intime0, rejdata / 10 ** nrm, 'ro', markersize=2)
# new data for output file
outdata = indata / masterfit
outerr = inerr / masterfit
pout = copy(outdata)
ylab = 'Normalized Flux'
# plot ranges
if plot:
plt.xlim(xmin-xr*0.01, xmax+xr*0.01)
if ymin >= 0.0:
plt.ylim(ymin-yr*0.01, ymax+yr*0.01)
else:
plt.ylim(1.0e-10, ymax+yr*0.01)
# plot residual data
ax = plt.axes([0.06,0.09,0.93,0.43])
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
# rotate y labels by 90 deg
labels = ax.get_yticklabels()
ymin = pout.min()
ymax = pout.max()
yr = ymax - ymin
pout = np.insert(pout, [0], [0.0])
pout = np.append(pout, 0.0)
plt.plot(ptime[1:-1], pout[1:-1], color='#363636', linestyle='-',
linewidth=1.0)
plt.fill(ptime, pout, color='#a8a7a7', linewidth=0.0, alpha=0.2)
plt.xlabel(xlab, {'color' : 'k'})
plt.ylabel(ylab, {'color' : 'k'})
plt.grid()
# plot ranges
plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
if ymin >= 0.0:
plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
else:
plt.ylim(1.0e-10, ymax + yr * 0.01)
# render plot
plt.savefig(re.sub('.fits', '.png', outfile))
plt.show()
# add NaNs back into data
n = 0
work1 = np.array([], dtype='float32')
work2 = np.array([], dtype='float32')
instr = pyfits.open(infile, 'readonly')
table = kepio.readfitstab(infile, instr[1], logfile, verbose)
tn = table.field('time')
dn = table.field(datacol)
for i in range(len(table.field(0))):
if np.isfinite(tn[i]) and np.isfinite(dn[i]) and np.isfinite(err[i]):
try:
work1 = np.append(work1, outdata[n])
work2 = np.append(work2, outerr[n])
n += 1
except:
pass
else:
work1 = np.append(work1, np.nan)
work2 = np.append(work2, np.nan)
# history keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
# write output file
try:
print("Writing output file {}...".format(outfile))
col1 = pyfits.Column(name='DETSAP_FLUX',format='E13.7',array=work1)
col2 = pyfits.Column(name='DETSAP_FLUX_ERR',format='E13.7',array=work2)
cols = instr[1].data.columns + col1 + col2
instr[1] = pyfits.BinTableHDU.from_columns(cols, header=instr[1].header)
instr.writeto(outfile)
except ValueError:
try:
instr[1].data.field('DETSAP_FLUX')[:] = work1
instr[1].data.field('DETSAP_FLUX_ERR')[:] = work2
instr.writeto(outfile)
except:
message = ('ERROR -- KEPFLATTEN: cannot add DETSAP_FLUX data to '
'FITS file')
kepmsg.err(logfile, message, verbose)
# close input file
instr.close()
## end time
kepmsg.clock('KEPFLATTEN completed at', logfile, verbose)
def kepflatten_main():
import argparse
parser = argparse.ArgumentParser(
description=('Remove low frequency variability from time-series,'
'preserve transits and flares'),
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-kepflatten.'),
default=None)
parser.add_argument('--datacol', default='PDCSAP_FLUX',
help='Name of data column to plot', type=str)
parser.add_argument('--errcol', default='PDCSAP_FLUX_ERR',
help='Name of data error column to plot', type=str)
parser.add_argument('--nsig', default=3.,
help='Sigma clipping threshold for outliers',
type=float)
parser.add_argument('--stepsize', default=0.5,
help='Stepsize on which to fit data [days]',
type=float)
parser.add_argument('--winsize', default=5.0,
help=('Window size of data to fit after each step'
' (>= stepsize) [days]'),
type=float)
parser.add_argument('--npoly', default=3,
help='Polynomial order for each fit', type=int)
parser.add_argument('--niter', default=1,
help='Maximum number of clipping iterations', type=int)
parser.add_argument('--ranges', default='0,0',
help='Time ranges of regions to filter',
type=str)
parser.add_argument('--plot', action='store_true', help='Plot result?')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='kepflatten.log', dest='logfile', type=str)
args = parser.parse_args()
kepflatten(args.infile, args.outfile, args.datacol, args.errcol, args.nsig,
args.stepsize, args.winsize, args.npoly, args.niter,
args.ranges, args.plot, args.overwrite, args.verbose,
args.logfile)
|
mit
|
vrv/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py
|
62
|
5053
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
erikgrinaker/BOUT-dev
|
tools/tokamak_grids/pyGridGen/surface.py
|
7
|
1180
|
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
def SURFACE(Z, fig, xtitle=None, ytitle=None, title=None, var=None, sub=None):
if sub==None :
ax = fig.gca(projection='3d')
else:
ax = fig.add_subplot(sub[0],sub[1],sub[2], projection='3d')
nx=np.shape(Z)[0]
ny=np.shape(Z)[1]
zmin=np.min(Z)
zmax=np.max(Z)
X = np.arange(nx)
Y = np.arange(ny)
X, Y = np.meshgrid(X, Y)
surf = ax.plot_surface(X.T, Y.T, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(zmin-1., zmax+1.)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_zticklabels([])
ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
if title != None : ax.set_zlabel(title)
cbar=fig.colorbar(surf, shrink=1., aspect=15, orientation='horizontal', format='%.1e')
cbar.ax.set_xlabel(var)
cbar.ax.tick_params(labelsize=10)
plt.draw()
|
gpl-3.0
|
vigilv/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
xuanyuanking/spark
|
python/pyspark/pandas/missing/indexes.py
|
16
|
9920
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pyspark.pandas.missing import unsupported_function, unsupported_property, common
def _unsupported_function(method_name, deprecated=False, reason="", cls="Index"):
return unsupported_function(
class_name="pd.{}".format(cls),
method_name=method_name,
deprecated=deprecated,
reason=reason,
)
def _unsupported_property(property_name, deprecated=False, reason="", cls="Index"):
return unsupported_property(
class_name="pd.{}".format(cls),
property_name=property_name,
deprecated=deprecated,
reason=reason,
)
class MissingPandasLikeIndex(object):
# Properties
nbytes = _unsupported_property("nbytes")
# Functions
argsort = _unsupported_function("argsort")
asof_locs = _unsupported_function("asof_locs")
format = _unsupported_function("format")
get_indexer = _unsupported_function("get_indexer")
get_indexer_for = _unsupported_function("get_indexer_for")
get_indexer_non_unique = _unsupported_function("get_indexer_non_unique")
get_loc = _unsupported_function("get_loc")
get_slice_bound = _unsupported_function("get_slice_bound")
get_value = _unsupported_function("get_value")
groupby = _unsupported_function("groupby")
is_ = _unsupported_function("is_")
is_lexsorted_for_tuple = _unsupported_function("is_lexsorted_for_tuple")
join = _unsupported_function("join")
map = _unsupported_function("map")
putmask = _unsupported_function("putmask")
ravel = _unsupported_function("ravel")
reindex = _unsupported_function("reindex")
searchsorted = _unsupported_function("searchsorted")
slice_indexer = _unsupported_function("slice_indexer")
slice_locs = _unsupported_function("slice_locs")
sortlevel = _unsupported_function("sortlevel")
to_flat_index = _unsupported_function("to_flat_index")
to_native_types = _unsupported_function("to_native_types")
where = _unsupported_function("where")
# Deprecated functions
is_mixed = _unsupported_function("is_mixed")
get_values = _unsupported_function("get_values", deprecated=True)
set_value = _unsupported_function("set_value")
# Properties we won't support.
array = common.array(_unsupported_property)
duplicated = common.duplicated(_unsupported_property)
# Functions we won't support.
memory_usage = common.memory_usage(_unsupported_function)
__iter__ = common.__iter__(_unsupported_function)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
# Deprecated properties
strides = _unsupported_property("strides", deprecated=True)
data = _unsupported_property("data", deprecated=True)
itemsize = _unsupported_property("itemsize", deprecated=True)
base = _unsupported_property("base", deprecated=True)
flags = _unsupported_property("flags", deprecated=True)
# Deprecated functions
get_duplicates = _unsupported_function("get_duplicates", deprecated=True)
summary = _unsupported_function("summary", deprecated=True)
contains = _unsupported_function("contains", deprecated=True)
class MissingPandasLikeDatetimeIndex(MissingPandasLikeIndex):
# Properties
nanosecond = _unsupported_property("nanosecond", cls="DatetimeIndex")
date = _unsupported_property("date", cls="DatetimeIndex")
time = _unsupported_property("time", cls="DatetimeIndex")
timetz = _unsupported_property("timetz", cls="DatetimeIndex")
tz = _unsupported_property("tz", cls="DatetimeIndex")
freq = _unsupported_property("freq", cls="DatetimeIndex")
freqstr = _unsupported_property("freqstr", cls="DatetimeIndex")
inferred_freq = _unsupported_property("inferred_freq", cls="DatetimeIndex")
# Functions
snap = _unsupported_function("snap", cls="DatetimeIndex")
tz_convert = _unsupported_function("tz_convert", cls="DatetimeIndex")
tz_localize = _unsupported_function("tz_localize", cls="DatetimeIndex")
to_period = _unsupported_function("to_period", cls="DatetimeIndex")
to_perioddelta = _unsupported_function("to_perioddelta", cls="DatetimeIndex")
to_pydatetime = _unsupported_function("to_pydatetime", cls="DatetimeIndex")
mean = _unsupported_function("mean", cls="DatetimeIndex")
std = _unsupported_function("std", cls="DatetimeIndex")
class MissingPandasLikeCategoricalIndex(MissingPandasLikeIndex):
# Functions
rename_categories = _unsupported_function("rename_categories", cls="CategoricalIndex")
reorder_categories = _unsupported_function("reorder_categories", cls="CategoricalIndex")
add_categories = _unsupported_function("add_categories", cls="CategoricalIndex")
remove_categories = _unsupported_function("remove_categories", cls="CategoricalIndex")
remove_unused_categories = _unsupported_function(
"remove_unused_categories", cls="CategoricalIndex"
)
set_categories = _unsupported_function("set_categories", cls="CategoricalIndex")
as_ordered = _unsupported_function("as_ordered", cls="CategoricalIndex")
as_unordered = _unsupported_function("as_unordered", cls="CategoricalIndex")
map = _unsupported_function("map", cls="CategoricalIndex")
class MissingPandasLikeMultiIndex(object):
# Deprecated properties
strides = _unsupported_property("strides", deprecated=True)
data = _unsupported_property("data", deprecated=True)
itemsize = _unsupported_property("itemsize", deprecated=True)
# Functions
argsort = _unsupported_function("argsort")
asof_locs = _unsupported_function("asof_locs")
equal_levels = _unsupported_function("equal_levels")
factorize = _unsupported_function("factorize")
format = _unsupported_function("format")
get_indexer = _unsupported_function("get_indexer")
get_indexer_for = _unsupported_function("get_indexer_for")
get_indexer_non_unique = _unsupported_function("get_indexer_non_unique")
get_loc = _unsupported_function("get_loc")
get_loc_level = _unsupported_function("get_loc_level")
get_locs = _unsupported_function("get_locs")
get_slice_bound = _unsupported_function("get_slice_bound")
get_value = _unsupported_function("get_value")
groupby = _unsupported_function("groupby")
is_ = _unsupported_function("is_")
is_lexsorted = _unsupported_function("is_lexsorted")
is_lexsorted_for_tuple = _unsupported_function("is_lexsorted_for_tuple")
join = _unsupported_function("join")
map = _unsupported_function("map")
putmask = _unsupported_function("putmask")
ravel = _unsupported_function("ravel")
reindex = _unsupported_function("reindex")
remove_unused_levels = _unsupported_function("remove_unused_levels")
reorder_levels = _unsupported_function("reorder_levels")
searchsorted = _unsupported_function("searchsorted")
set_codes = _unsupported_function("set_codes")
set_levels = _unsupported_function("set_levels")
slice_indexer = _unsupported_function("slice_indexer")
slice_locs = _unsupported_function("slice_locs")
sortlevel = _unsupported_function("sortlevel")
to_flat_index = _unsupported_function("to_flat_index")
to_native_types = _unsupported_function("to_native_types")
truncate = _unsupported_function("truncate")
where = _unsupported_function("where")
# Deprecated functions
is_mixed = _unsupported_function("is_mixed")
get_duplicates = _unsupported_function("get_duplicates", deprecated=True)
get_values = _unsupported_function("get_values", deprecated=True)
set_value = _unsupported_function("set_value", deprecated=True)
# Functions we won't support.
array = common.array(_unsupported_property)
duplicated = common.duplicated(_unsupported_property)
codes = _unsupported_property(
"codes",
reason="'codes' requires to collect all data into the driver which is against the "
"design principle of pandas-on-Spark. Alternatively, you could call 'to_pandas()' and"
" use 'codes' property in pandas.",
)
levels = _unsupported_property(
"levels",
reason="'levels' requires to collect all data into the driver which is against the "
"design principle of pandas-on-Spark. Alternatively, you could call 'to_pandas()' and"
" use 'levels' property in pandas.",
)
__iter__ = common.__iter__(_unsupported_function)
# Properties we won't support.
memory_usage = common.memory_usage(_unsupported_function)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
# Deprecated properties
base = _unsupported_property("base", deprecated=True)
labels = _unsupported_property("labels", deprecated=True)
flags = _unsupported_property("flags", deprecated=True)
# Deprecated functions
set_labels = _unsupported_function("set_labels")
summary = _unsupported_function("summary", deprecated=True)
to_hierarchical = _unsupported_function("to_hierarchical", deprecated=True)
contains = _unsupported_function("contains", deprecated=True)
|
apache-2.0
|
lcpt/xc
|
python_modules/rough_calculations/ng_retaining_wall.py
|
1
|
32976
|
# -*- coding: utf-8 -*-
from __future__ import division
'''Routines for cantilever retaining walls design.'''
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2016, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected]"
import sys
from postprocess.reports import common_formats as fmt
from postprocess.reports import draw_schema_armature_mur as draw_schema
from postprocess import get_reactions
import math
import scipy.interpolate
import matplotlib
#matplotlib.use('PS')
import matplotlib.pyplot as plt
from materials import typical_materials
from materials.sections import section_properties
from materials.sia262 import SIA262_materials
from model.geometry import retaining_wall_geometry
from rough_calculations import ng_rebar_def
from rough_calculations import ng_rc_section
import os
from miscUtils import LogMessages as lmsg
import geom
import xc
from solution import predefined_solutions
def filterRepeatedValues(yList,mList,vList):
sz= len(yList)
mapM={}
mapV= {}
for i in range(0,sz):
y= abs(yList[i])
mapM[y]= mList[i]
mapV[y]= vList[i]
retY= list()
retM= list()
retV= list()
for y in mapM:
retY.append(y)
retY.sort()
for y in retY:
retM.append(mapM[y])
retV.append(mapV[y])
return retY, retM, retV
class InternalForces(object):
'''Internal forces for a retaining wall obtained.'''
def __init__(self,y,mdMax,vdMax,MdSemelle,VdSemelle):
self.y, self.mdMax, self.vdMax= filterRepeatedValues(y,mdMax,vdMax)
self.interpolate()
self.stemHeight= self.y[-1]
print 'stemHeight= ', self.stemHeight
self.MdSemelle= MdSemelle
self.VdSemelle= VdSemelle
def interpolate(self):
self.mdMaxVoile= scipy.interpolate.interp1d(self.y,self.mdMax)
self.vdMaxVoile= scipy.interpolate.interp1d(self.y,self.vdMax)
def __imul__(self,f):
for m in self.mdMax:
m*=f
for v in self.vdMax:
v*=f
self.interpolate()
self.MdSemelle*=f
self.VdSemelle*=f
return self
def clone(self):
return InternalForces(self.y, self.mdMax, self.vdMax,self.MdSemelle,self.VdSemelle)
def __mul__(self, f):
retval= self.clone()
retval*= f
return retval
def __rmul__(self,f):
return self*f
def MdEncastrement(self,footingThickness):
'''Bending moment (envelope) at stem base.'''
yEncastrement= self.stemHeight-footingThickness/2.0
return self.Md(yEncastrement)
def VdEncastrement(self,epaisseurEncastrement):
'''Shear force (envelope) at stem base.'''
yV= self.stemHeight-epaisseurEncastrement
return abs(self.vdMaxVoile(yV))
def Vd(self, yCoupe):
'''Shear (envelope) at height yCoupe.'''
return abs(self.vdMaxVoile(yCoupe))
def Md(self, yCoupe):
'''Bending moment (envelope) at height yCoupe.'''
return abs(self.mdMaxVoile(yCoupe))
def getYVoile(self,hCoupe):
return self.stemHeight-hCoupe
def writeGraphic(self,fileName):
'''Draws a graphic of internal forces (envelopes) in
the wall stem.'''
z= []
for yi in self.y:
z.append(self.stemHeight-yi)
m= []
for mi in self.mdMax:
m.append(mi/1e3)
v= []
for vi in self.vdMax:
v.append(vi/1e3)
plt.plot(m,z,'-', v, z,'--')
plt.legend(['Md (kN m/m)', 'Vd (kN/m)'], loc='best')
plt.title("Efforts internes.")
plt.savefig(fileName)
plt.close()
class RetainingWallReinforcement(dict):
''' Simplified reinforcement for a cantilever retaining wall.'''
def __init__(self,concreteCover=40e-3, steel= SIA262_materials.B500B):
'''Constructor '''
super(RetainingWallReinforcement, self).__init__()
self.concreteCover= concreteCover
#Materials.
self.steel= steel
#Default reinforcement
AdefA= ng_rebar_def.RebarFamily(self.steel,8e-3,0.15,concreteCover)
Adef= AdefA
for i in range(1,15):
self[i]= Adef
# #Armature de peau semelle
# R= self.footingThickness-2*self.concreteCover-8e-3
# n= math.ceil(R/0.15)+1
# ecart= R/(n-1)
# self[10]= FamNBars(self.steel,n,8e-3,ecart,concreteCover)
# #Armature couronnement.
# R= self.stemTopWidth-2*self.concreteCover-8e-3
# n= math.ceil(R/0.15)+1
# ecart= R/(n-1)
# self[13]= FamNBars(self.steel,n,8e-3,ecart,concreteCover)
def setArmature(self,index,armature):
'''Assigns armature.'''
self[index]= armature
def getArmature(self,index):
'''Return armature at index.'''
return self[index]
class WallStabilityResults(object):
def __init__(self,wall,combinations,foundationSoilModel,gammaR= 1):
self.Foverturning= 1e15
self.FoverturningComb= ''
self.Fsliding= 1e15
self.FslidingComb= ''
self.Fbearing= 1e15
self.FbearingComb= ''
for comb in combinations:
reactions= wall.resultComb(comb)
R= reactions.getResultant()
Foverturning= wall.getOverturningSafetyFactor(R,gammaR)
if(Foverturning<self.Foverturning):
self.Foverturning= Foverturning
self.FoverturningComb= comb
Fsliding= wall.getSlidingSafetyFactor(R,gammaR,foundationSoilModel)
if(Fsliding<self.Fsliding):
self.Fsliding= Fsliding
self.FslidingComb= comb
Fbearing= wall.getBearingPressureSafetyFactor(R,foundationSoilModel,1.0)
if(Fbearing<self.Fbearing):
self.Fbearing= Fbearing
self.FbearingComb= comb
def writeOutput(self,outputFile,name):
'''Write results in LaTeX format.'''
outputFile.write("\\begin{center}\n")
outputFile.write("\\begin{tabular}[H]{|l|c|c|c|}\n")
outputFile.write("\\hline\n")
outputFile.write("\\multicolumn{4}{|c|}{\\textsc{Verification stabilité mur: "+name+"}}\\\\\n")
outputFile.write("\\hline\n")
outputFile.write("Vérification: & $F_{disp}$ & $F_{req}$ & Combinaison\\\\\n")
outputFile.write("\\hline\n")
outputFile.write("Renversement: & " + fmt.Factor.format(self.Foverturning) +" & 1.00 & "+self.FoverturningComb+'\\\\\n')
outputFile.write("Glissement: & " + fmt.Factor.format(self.Fsliding) +" & 1.00 & "+self.FslidingComb+'\\\\\n')
outputFile.write("Poinçonnement: & " + fmt.Factor.format(self.Fbearing) +" & 1.00 & "+self.FbearingComb+'\\\\\n')
outputFile.write("\\hline\n")
outputFile.write("\\multicolumn{4}{|l|}{$F_{disp}$: sécurité disponible.}\\\\\n")
outputFile.write("\\multicolumn{4}{|l|}{$F_{req}$: sécurité requise.}\\\\\n")
outputFile.write("\\hline\n")
outputFile.write("\\end{tabular}\n")
outputFile.write("\\end{center}\n")
class WallULSResults(object):
def __init__(self,internalForces):
self.internalForces= internalForces
class WallSLSResults(WallULSResults):
def __init__(self,internalForces,rotation, rotationComb):
super(WallSLSResults,self).__init__(internalForces)
self.rotation= rotation
self.rotationComb= rotationComb
def writeOutput(self,outputFile,name):
'''Write results in LaTeX format.'''
outputFile.write("\\begin{center}\n")
outputFile.write("\\begin{tabular}[H]{|l|c|c|c|}\n")
outputFile.write("\\hline\n")
outputFile.write("\\multicolumn{3}{|c|}{\\textsc{Verification rotation mur: "+name+"}}\\\\\n")
outputFile.write("\\hline\n")
outputFile.write("$\\beta_{disp} (\\permil)$ & $\\beta_{req}(\\permil)$ & Combinaison\\\\\n")
outputFile.write("\\hline\n")
outputFile.write(fmt.Factor.format(self.rotation*1000) +" & 2.00 & "+self.rotationComb+'\\\\\n')
outputFile.write("\\hline\n")
outputFile.write("\\multicolumn{3}{|l|}{$\\beta_{disp}$: rotation maximale calculée du mur.}\\\\\n")
outputFile.write("\\multicolumn{3}{|l|}{$\\beta_{req}$: rotation maximale autorisée du mur.}\\\\\n")
outputFile.write("\\hline\n")
outputFile.write("\\end{tabular}\n")
outputFile.write("\\end{center}\n")
class RetainingWall(retaining_wall_geometry.CantileverRetainingWallGeometry):
'''Cantilever retaining wall.'''
b= 1.0
def __init__(self,name= 'prb',concreteCover=40e-3,stemBottomWidth=0.25,stemTopWidth=0.25,footingThickness= 0.25):
'''Constructor '''
super(RetainingWall,self).__init__(name,stemBottomWidth,stemTopWidth,footingThickness)
#Materials.
self.concrete= SIA262_materials.c25_30
self.reinforcement= RetainingWallReinforcement(concreteCover)
def getBasicAnchorageLength(self,index):
'''Returns basic anchorage length for the reinforcement at "index".'''
return self.reinforcement.getArmature(index).getBasicAnchorageLength(self.concrete)
def getSection1(self):
'''Returns RC section for armature in position 1.'''
return ng_rc_section.RCSection(self.reinforcement[1],self.concrete,self.b,self.stemBottomWidth)
def getSection2(self,y):
'''Returns RC section for armature in position 2.'''
c= self.getDepth(y)
return ng_rc_section.RCSection(self.reinforcement[2],self.concrete,self.b,c)
def getSection3(self):
'''Returns RC section for armature in position 3.'''
return ng_rc_section.RCSection(self.reinforcement[3],self.concrete,self.b,self.footingThickness)
def getSection4(self):
'''Returns RC section for armature in position 4.'''
return ng_rc_section.RCSection(self.reinforcement[4],self.concrete,self.b,self.stemBottomWidth)
def getSection6(self):
'''Returns RC section for armature in position 6.'''
return ng_rc_section.RCSection(self.reinforcement[6],self.concrete,self.b,self.stemTopWidth)
def getSection7(self):
'''Returns RC section for armature in position 7.'''
return ng_rc_section.RCSection(self.reinforcement[7],self.concrete,self.b,self.footingThickness)
def getSection8(self):
'''Returns RC section for armature in position 8.'''
return ng_rc_section.RCSection(self.reinforcement[8],self.concrete,self.b,self.footingThickness)
def getSection11(self):
'''Returns RC section for armature in position 11.'''
return ng_rc_section.RCSection(self.reinforcement[11],self.concrete,self.b,(self.stemTopWidth+self.stemBottomWidth)/2.0)
def setULSInternalForcesEnvelope(self,wallInternalForces):
'''Assigns the ultimate limit state infernal forces envelope for the stem.'''
if(hasattr(self,'stemHeight')):
if(self.getWFStemHeigth()!=wallInternalForces.stemHeight):
lmsg.warning('stem height (' + str(self.stemHeight) + ' m) different from length of internal forces envelope law('+ str(wallInternalForces.stemHeight)+ ' m')
else:
self.stemHeight= wallInternalForces.stemHeight-self.footingThickness/2.0
self.internalForcesULS= wallInternalForces
def setSLSInternalForcesEnvelope(self,wallInternalForces):
'''Assigns the serviceability limit state infernal forces envelope for the stem.'''
if(hasattr(self,'stemHeight')):
if(self.getWFStemHeigth()!=wallInternalForces.stemHeight):
lmsg.warning('stem height (' + str(self.stemHeight) + ' m) different from length of internal forces envelope law('+ str(wallInternalForces.stemHeight)+ ' m')
else:
self.stemHeight= wallInternalForces.stemHeight
self.internalForcesSLS= wallInternalForces
def writeDef(self,pth,outputFile):
'''Write wall definition in LaTeX format.'''
pathFiguraEPS= pth+self.name+".eps"
pathFiguraPDF= pth+self.name+".pdf"
self.internalForcesULS.writeGraphic(pathFiguraEPS)
os.system("convert "+pathFiguraEPS+" "+pathFiguraPDF)
outputFile.write("\\begin{table}\n")
outputFile.write("\\begin{center}\n")
outputFile.write("\\begin{tabular}[H]{|l|}\n")
outputFile.write("\\hline\n")
outputFile.write("\\multicolumn{1}{|c|}{\\textsc{"+self.name+"}}\\\\\n")
outputFile.write("\\hline\n")
outputFile.write("\\begin{tabular}{c|l}\n")
outputFile.write("\\begin{minipage}{85mm}\n")
outputFile.write("\\vspace{2mm}\n")
outputFile.write("\\begin{center}\n")
outputFile.write("\\includegraphics[width=80mm]{"+self.name+"}\n")
outputFile.write("\\end{center}\n")
outputFile.write("\\vspace{1pt}\n")
outputFile.write("\\end{minipage} & \n")
self.writeGeometry(outputFile)
outputFile.write("\\end{tabular} \\\\\n")
outputFile.write("\\hline\n")
outputFile.write("\\begin{tabular}{llll}\n")
outputFile.write("\\multicolumn{3}{c}{\\textsc{Matériels}}\\\\\n")
outputFile.write(" Béton: " + self.concrete.materialName +" & ")
outputFile.write(" Acier: " + self.reinforcement.steel.materialName +" & ")
outputFile.write(" ConcreteCover: "+ fmt.Diam.format(self.reinforcement.concreteCover*1e3)+ " mm\\\\\n")
outputFile.write("\\end{tabular} \\\\\n")
outputFile.write("\\hline\n")
outputFile.write("\\end{tabular}\n")
outputFile.write("\\caption{Matériels et dimensions mur "+ self.name +"} \\label{tb_def_"+self.name+"}\n")
outputFile.write("\\end{center}\n")
outputFile.write("\\end{table}\n")
def writeResult(self,pth):
'''Write reinforcement verification results in LaTeX format.'''
outputFile= open(pth+self.name+".tex","w")
self.writeDef(pth,outputFile)
self.stability_results.writeOutput(outputFile,self.name)
self.sls_results.writeOutput(outputFile,self.name)
outputFile.write("\\bottomcaption{Calcul armatures mur "+ self.name +"} \\label{tb_"+self.name+"}\n")
outputFile.write("\\tablefirsthead{\\hline\n\\multicolumn{1}{|c|}{\\textsc{Armatures mur "+self.name+"}}\\\\\\hline\n}\n")
outputFile.write("\\tablehead{\\hline\n\\multicolumn{1}{|c|}{\\textsc{"+self.name+" (suite)}}\\\\\\hline\n}\n")
outputFile.write("\\tabletail{\\hline \\multicolumn{1}{|r|}{../..}\\\\\\hline}\n")
outputFile.write("\\tablelasttail{\\hline}\n")
outputFile.write("\\begin{center}\n")
outputFile.write("\\begin{supertabular}[H]{|l|}\n")
outputFile.write("\\hline\n")
#Coupe 1. Béton armé. Encastrement.
C1= self.getSection1()
VdEncastrement= self.internalForcesULS.VdEncastrement(self.stemBottomWidth)
MdEncastrement= self.internalForcesULS.MdEncastrement(self.footingThickness)
outputFile.write("\\textbf{Armature 1 (armature extérieure en attente) :} \\\\\n")
NdEncastrement= 0.0 #we neglect axial force
C1.writeResultFlexion(outputFile,NdEncastrement, MdEncastrement,VdEncastrement)
C1.writeResultStress(outputFile,self.internalForcesSLS.MdEncastrement(self.footingThickness))
#Coupe 2. Béton armé. Voile
yCoupe2= self.internalForcesULS.getYVoile(self.getBasicAnchorageLength(1))
C2= self.getSection2(yCoupe2)
Nd2= 0.0 #we neglect axial force
Vd2= self.internalForcesULS.Vd(yCoupe2)
Md2= self.internalForcesULS.Md(yCoupe2)
outputFile.write("\\textbf{Armature 2 (armature extériéure voile):}\\\\\n")
C2.writeResultFlexion(outputFile,Nd2,Md2,Vd2)
C2.writeResultStress(outputFile,self.internalForcesSLS.Md(yCoupe2))
#Coupe 3. Béton armé. Semelle
C3= self.getSection3()
Nd3= 0.0 #we neglect axial force
Vd3= self.internalForcesULS.VdSemelle
Md3= self.internalForcesULS.MdSemelle
outputFile.write("\\textbf{Armature 3 (armature supérieure semelle):}\\\\\n")
C3.writeResultFlexion(outputFile,Nd3,Md3,Vd3)
C3.writeResultStress(outputFile,self.internalForcesSLS.MdSemelle)
C4= self.getSection4()
C5= C4
C6= self.getSection6()
C7= self.getSection7()
C8= self.getSection8()
C9= C8
C11= self.getSection11()
C12= C11
#Coupe 4. armature intérieure en attente. Encastrement voile
outputFile.write("\\textbf{Armature 4 (armature intérieure en attente):}\\\\\n")
C4.writeResultCompression(outputFile,0.0,C12.tensionRebars.getAs())
#Coupe 5. armature intérieure en voile.
outputFile.write("\\textbf{Armature 5 (armature intérieure en voile):}\\\\\n")
C5.writeResultCompression(outputFile,0.0,C12.tensionRebars.getAs())
#Coupe 6. armature couronnement.
outputFile.write("\\textbf{Armature 6 (armature couronnement):}\\\\\n")
C6.writeResultFlexion(outputFile,0.0,0.0,0.0)
#Coupe 7. armature inférieure semelle.
outputFile.write("\\textbf{Armature 7 (armature trsv. inférieure semelle):}\\\\\n")
C7.writeResultCompression(outputFile,0.0,C8.tensionRebars.getAs())
#Coupe 8. armature long. inférieure semelle.
outputFile.write("\\textbf{Armature 8 (armature long. inférieure semelle):}\\\\\n")
C8.writeResultTraction(outputFile,0.0)
#Coupe 9. armature long. supérieure semelle.
outputFile.write("\\textbf{Armature 9 (armature long. supérieure semelle):}\\\\\n")
C9.writeResultTraction(outputFile,0.0)
#Armature 10. armature de peau semelle.
outputFile.write("\\textbf{Armature 10 (armature de peau semelle):}\\\\\n")
outputFile.write(" --\\\\\n")
#writeRebars(outputFile,self.concrete,self.reinforcement[10],1e-5)
#Coupe 11. armature long. extérieure voile.
outputFile.write("\\textbf{Armature 11 (armature long. extérieure voile):}\\\\\n")
C11.writeResultTraction(outputFile,0.0)
#Coupe 12. armature long. intérieure voile.
outputFile.write("\\textbf{Armature 12 (armature long. intérieure voile):}\\\\\n")
C12.writeResultTraction(outputFile,0.0)
#Armature 13. armature long. couronnement.
outputFile.write("\\textbf{Armature 13 (armature long. couronnement):}\\\\\n")
outputFile.write(" --\\\\\n")
#writeRebars(outputFile,self.concrete,self.reinforcement[13],1e-5)
outputFile.write("\\hline\n")
outputFile.write("\\end{supertabular}\n")
outputFile.write("\\end{center}\n")
outputFile.close()
def drawSchema(self,pth):
'''Retaining wall scheme drawing in LaTeX format.'''
outputFile= open(pth+'schema_'+self.name+".tex","w")
outputFile.write("\\begin{figure}\n")
outputFile.write("\\begin{center}\n")
outputFile.write(draw_schema.hdr)
for l in draw_schema.lines:
outputFile.write(l)
defStrings= {}
defStrings[1]= self.getSection1().tensionRebars.getDefStrings()
yCoupe2= self.internalForcesULS.getYVoile(self.getBasicAnchorageLength(1))
defStrings[2]= self.getSection2(yCoupe2).tensionRebars.getDefStrings()
defStrings[3]= self.getSection3().tensionRebars.getDefStrings()
defStrings[4]= self.getSection4().tensionRebars.getDefStrings()
defStrings[5]= self.getSection4().tensionRebars.getDefStrings() #C5==C4
defStrings[6]= self.getSection6().tensionRebars.getDefStrings()
defStrings[7]= self.getSection7().tensionRebars.getDefStrings()
defStrings[8]= self.getSection8().tensionRebars.getDefStrings()
defStrings[9]= self.getSection8().tensionRebars.getDefStrings() #C9==C8
#defStrings[10]= self.getSection10().tensionRebars.getDefStrings()
defStrings[11]= self.getSection11().tensionRebars.getDefStrings()
defStrings[12]= self.getSection11().tensionRebars.getDefStrings() #C12==C11
rebarAnno= draw_schema.getRebarAnnotationLines(defStrings)
for l in rebarAnno:
outputFile.write(l)
outputFile.write(draw_schema.tail)
outputFile.write("\\end{center}\n")
outputFile.write("\\caption{Schéma armatures mur "+ self.name +"} \\label{fg_"+self.name+"}\n")
outputFile.write("\\end{figure}\n")
def createFEProblem(self, title):
self.feProblem= xc.FEProblem()
self.feProblem.title= 'Retaining wall A'
return self.feProblem
def genMesh(self,nodes,springMaterials):
self.defineWireframeModel(nodes)
nodes.newSeedNode()
preprocessor= self.modelSpace.preprocessor
trfs= preprocessor.getTransfCooHandler
transformationName= self.name+'LinearTrf'
self.trf= trfs.newLinearCrdTransf2d(transformationName)
wallMatData= typical_materials.MaterialData(name=self.name+'Concrete',E=self.concrete.getEcm(),nu=0.2,rho=2500)
foundationSection= section_properties.RectangularSection(self.name+"FoundationSection",self.b,self.footingThickness)
foundationMaterial= foundationSection.defElasticShearSection2d(preprocessor,wallMatData) #Foundation elements material.
elementSize= 0.2
seedElemHandler= preprocessor.getElementHandler.seedElemHandler
seedElemHandler.defaultMaterial= foundationSection.sectionName
seedElemHandler.defaultTransformation= transformationName
seedElem= seedElemHandler.newElement("ElasticBeam2d",xc.ID([0,0]))
self.wallSet= preprocessor.getSets.defSet("wallSet")
self.heelSet= preprocessor.getSets.defSet("heelSet")
self.toeSet= preprocessor.getSets.defSet("toeSet")
self.foundationSet= preprocessor.getSets.defSet("foundationSet")
for lineName in ['heel','toe']:
l= self.wireframeModelLines[lineName]
l.setElemSize(elementSize)
l.genMesh(xc.meshDir.I)
for e in l.getElements():
self.foundationSet.getElements.append(e)
self.wallSet.getElements.append(e)
if(lineName=='heel'):
self.heelSet.getElements.append(e)
else:
self.toeSet.getElements.append(e)
self.foundationSet.fillDownwards()
stemSection= section_properties.RectangularSection(self.name+"StemSection",self.b,(self.stemTopWidth+self.stemBottomWidth)/2.0)
stemMaterial= stemSection.defElasticShearSection2d(preprocessor,wallMatData) #Stem elements material.
self.stemSet= preprocessor.getSets.defSet("stemSet")
for lineName in ['stem']:
l= self.wireframeModelLines[lineName]
l.setElemSize(elementSize)
seedElemHandler.defaultMaterial= stemSection.sectionName
l.genMesh(xc.meshDir.I)
for e in l.getElements():
y= -e.getPosCentroid(True).y
h= self.getDepth(y)
stemSection.h= h
e.sectionProperties= stemSection.getCrossSectionProperties2D(wallMatData)
self.stemSet.getElements.append(e)
self.wallSet.getElements.append(e)
# Springs on nodes.
self.foundationSet.computeTributaryLengths(False)
self.fixedNodes= []
elasticBearingNodes= self.foundationSet.getNodes
kX= springMaterials[0] #Horizontal
kSx= kX.E
kY= springMaterials[1] #Vertical
kSy= kY.E
lngTot= 0.0
for n in elasticBearingNodes:
lT= n.getTributaryLength()
lngTot+= lT
#print "tag= ", n.tag, " lT= ", lT
#print "before k= ", kY.E
kX.E= kSx*lT
kY.E= kSy*lT
fixedNode, newElem= self.modelSpace.setBearing(n.tag,["kX","kY"])
self.fixedNodes.append(fixedNode)
self.stemSet.fillDownwards()
self.wallSet.fillDownwards()
def createSelfWeightLoads(self,rho= 2500, grav= 9.81):
'''Create the loads of the concrete weight.'''
for e in self.wallSet.getElements:
selfWeightLoad= grav*rho*e.sectionProperties.A
e.vector2dUniformLoadGlobal(xc.Vector([0.0, -selfWeightLoad]))
def createDeadLoad(self,heelFillDepth,toeFillDepth,rho= 2000, grav= 9.81):
'''Create the loads of earth self weigth.'''
for e in self.heelSet.getElements:
heelFillLoad= grav*rho*heelFillDepth
e.vector2dUniformLoadGlobal(xc.Vector([0.0, -heelFillLoad]))
for e in self.toeSet.getElements:
toeFillLoad= grav*rho*toeFillDepth
e.vector2dUniformLoadGlobal(xc.Vector([0.0, -toeFillLoad]))
def createEarthPressureLoadOnStem(self,pressureModel,vDir= xc.Vector([-1.0,0.0]),Delta= 0.0):
'''Create the loads of the earth pressure over the stem.
:param pressureModel: (obj) earth pressure model.
:param vDir: (xc.Vector) direction for the pressures.
'''
pressureModel.appendLoadToCurrentLoadPattern(self.stemSet,vDir,iCoo= 1,delta= Delta)
def createEarthPressureLoadOnHeelEnd(self,pressureModel):
'''Create the loads of the earth pressure over the vertical face
at the end of the heel.
:param pressureModel: (obj) earth pressure model.
'''
n= self.wireframeModelPoints['heelEnd'].getNode()
z= n.getInitialPos2d.y
force= pressureModel.getPressure(z)*self.footingThickness
loadVector= force*xc.Vector([-1.0,0.0,0.0])
n.newLoad(loadVector)
def createEarthPressureLoadOnToeEnd(self,pressureModel):
'''Create the loads of the earth pressure over the vertical face
at the end of the toe.
:param pressureModel: (obj) earth pressure model.
'''
n= self.wireframeModelPoints['toeEnd'].getNode()
z= n.getInitialPos2d.y
force= pressureModel.getPressure(z)*self.footingThickness
loadVector= force*xc.Vector([1.0,0.0,0.0])
n.newLoad(loadVector)
def createBackFillPressures(self,pressureModel,Delta= 0.0):
'''Create backfill earth pressures over the wall.
:param pressureModel: (obj) earth pressure model for the backfill.
'''
self.createEarthPressureLoadOnStem(pressureModel,Delta= Delta)
self.createEarthPressureLoadOnHeelEnd(pressureModel)
def createFrontFillPressures(self,pressureModel,Delta= 0.0):
'''Create front fill earth pressures over the wall.
:param pressureModel: (obj) earth pressure model for the backfill.
'''
self.createEarthPressureLoadOnStem(pressureModel,xc.Vector([1.0,0.0]),Delta= Delta)
self.createEarthPressureLoadOnToeEnd(pressureModel)
def createVerticalLoadOnHeel(self,loadOnBackFill):
'''Create the loads over the heel dues to a load acting on the backfill.
:param loadOnBackFill: (obj) load acting on the backfill.
'''
loadOnBackFill.appendVerticalLoadToCurrentLoadPattern(self.heelSet,xc.Vector([0.0,-1.0]),0,1)
def createPressuresFromLoadOnBackFill(self, loadOnBackFill,Delta= 0.0):
'''Create the pressures on the stem and on the heel dues to
a load acting on the backfill.
:param loadOnBackFill: (obj) load acting on the backfill.
'''
self.createEarthPressureLoadOnStem(loadOnBackFill,Delta= Delta) #Pressures on stem.
self.createEarthPressureLoadOnHeelEnd(loadOnBackFill) #Force on heel end.
self.createVerticalLoadOnHeel(loadOnBackFill) #Vertical load on heel.
def createLoadOnTopOfStem(self,loadVector):
'''Create a loac acting on the node at the top of the stem.
:param loadVector: (vector) vector defining the load.
'''
n= self.wireframeModelPoints['stemTop'].getNode()
n.newLoad(loadVector)
def getMononobeOkabeDryOverpressure(self,backFillModel,kv,kh,delta_ad= 0,beta= 0, Kas= None, g= 9.81):
''' Return overpressure due to seismic action according to Mononobe-Okabe
:param backFillModel: back fill terrain model
:param kv: seismic coefficient of vertical acceleration.
:param kh: seismic coefficient of horizontal acceleration.
:param delta_ad: angle of friction soil - structure.
:param beta: slope inclination of backfill.
'''
H= self.getTotalHeight()
psi= math.radians(90) #back face inclination of the structure (< PI/2)
return backFillModel.getMononobeOkabeDryOverpressure(H, kv, kh, psi, delta_ad, beta, Kas,g)/H
def getReactions(self):
'''Return the reactions on the foundation.'''
return get_reactions.Reactions(self.modelSpace.preprocessor,self.fixedNodes)
def getEccentricity(self,R):
'''Return the eccenctricity of the loads acting on the retaining wall.
:param R: (SlidingVectorsSystem3d) resultant of the loads acting on the retaining wall.
'''
foundationPlane= self.getFoundationPlane()
zml= R.zeroMomentLine(1e-5).getXY2DProjection() #Resultant line of action.
p= foundationPlane.getIntersectionWithLine(zml)[0] # Intersection with
# foundation plane.
foundationCenterPos2D= self.getFoundationCenterPosition()
return p.x-foundationCenterPos2D.x #eccentricity
def getFoundationRotation(self):
'''Returns the rotation of the foundation.'''
n0= self.wireframeModelPoints['toeEnd'].getNode()
n1= self.wireframeModelPoints['heelEnd'].getNode()
b= self.getFootingWidth()
delta= n1.getDisp[1]-n0.getDisp[1]
return math.atan(delta/b)
def getOverturningSafetyFactor(self,R,gammaR):
'''Return the factor of safety against overturning.
:param R: (SlidingVectorsSystem3d) resultant of the loads acting on the retaining wall.
:param gammaR: (float) partial resistance reduction factor.
'''
e= self.getEccentricity(R) #eccentricity
b= self.getFootingWidth()
bReduced= 2*(b/2.0+e)
return b/(3*(-e)*gammaR)
def getSlidingSafetyFactor(self,R,gammaR,foundationSoilModel):
'''Return the factor of safety against sliding.
:param R: (SlidingVectorsSystem3d) resultant of the loads acting on the retaining wall.
:param gammaR: partial resistance reduction factor.
:param foundationSoilModel: (FrictionalCohesionalSoil) soil model.
:param gammaMPhi: (float) partial reduction factor for internal friction angle of the soil.
:param gammaMc: (float) partial reduction factor for soil cohesion.
'''
foundationPlane= self.getFoundationPlane()
alphaAngle= math.atan(foundationPlane.getSlope())
F= R.getResultant()
F2D= geom.Vector2d(F.x,F.y)
Ftang= foundationPlane.getVector2dProj(F2D)
Fnormal= F2D-Ftang
#Sliding strength
e= self.getEccentricity(R) #eccentricity
b= self.getFootingWidth()
bReduced= 2*(b/2.0+e)
Rd= Fnormal.getModulo()*math.tan(foundationSoilModel.getDesignPhi())+foundationSoilModel.getDesignC()*bReduced/math.cos(alphaAngle)
return Rd/Ftang.getModulo()/gammaR
def getBearingPressureSafetyFactor(self,R,foundationSoilModel,toeFillDepth,q= 0.0):
''' Return the factor of safety against bearing capacity of the soil.
:param toeFillDepth: (float) depht of the soil filling over the toe.
:param q: (float) uniform load over the filling.
'''
D= self.getFoundationDepth(toeFillDepth)
Beff= self.b
e= self.getEccentricity(R) #eccentricity
b= self.getFootingWidth()
bReduced= 2*(b/2.0+e)
F= R.getResultant()
qu= foundationSoilModel.qu(q,D,self.b,bReduced,F.y,0.0,F.x)
sigma= F.y/bReduced
return qu/sigma
def getStemYCoordinates(self):
y= list()
for e in self.stemSet.getElements:
n1= e.getNodes[0]
y.append(n1.getInitialPos2d.y)
n2= e.getNodes[1]
y.append(n2.getInitialPos2d.y)
return y
def getStemInternalForces(self):
md= list()
vd= list()
for e in self.stemSet.getElements:
md.append(e.getMz1)
vd.append(e.getVy1)
md.append(e.getMz2)
vd.append(e.getVy2)
return md, vd
def getHeelInternalForces(self):
md= 1.0e15
vd= 1.0e15
for e in self.heelSet.getElements:
md= min(md,e.getMz1)
vd= min(vd,e.getVy1)
return md, vd
def resultComb(self,nmbComb):
'''Solution and result retrieval routine.'''
preprocessor= self.feProblem.getPreprocessor
preprocessor.resetLoadCase()
preprocessor.getLoadHandler.getLoadCombinations.addToDomain(nmbComb)
#Solution
solution= predefined_solutions.SolutionProcedure()
analysis= solution.simpleStaticLinear(self.feProblem)
result= analysis.analyze(1)
reactions= self.getReactions()
preprocessor.getLoadHandler.getLoadCombinations.removeFromDomain(nmbComb)
return reactions
def performStabilityAnalysis(self,combinations,foundationSoilModel):
self.stability_results= WallStabilityResults(self,combinations,foundationSoilModel)
return self.stability_results
def getEnvelopeInternalForces(self,envelopeMd, envelopeVd, envelopeMdHeel, envelopeVdHeel):
md, vd= self.getStemInternalForces()
tmpMd= [max(l1, l2) for l1, l2 in zip(envelopeMd, md)]
envelopeMd= tmpMd
tmpVd= [max(l1, l2) for l1, l2 in zip(envelopeVd, vd)]
envelopeVd= tmpVd
mdHeel, vdHeel= self.getHeelInternalForces()
envelopeMdHeel= min(mdHeel,envelopeMdHeel)
envelopeVdHeel= min(vdHeel,envelopeVdHeel)
return envelopeMd, envelopeVd, envelopeMdHeel, envelopeVdHeel
def performSLSAnalysis(self,combinations):
rotation= 1e15
rotationComb= ''
y= self.getStemYCoordinates()
envelopeMd= [0]*len(y)
envelopeVd= [0]*len(y)
envelopeMdHeel= 1.0e15
envelopeVdHeel= 1.0e15
for comb in combinations:
reactions= self.resultComb(comb)
envelopeMd, envelopeVd, envelopeMdHeel, envelopeVdHeel= self.getEnvelopeInternalForces(envelopeMd, envelopeVd, envelopeMdHeel, envelopeVdHeel)
rot= self.getFoundationRotation()
if(rot<rotation):
rotation= rot
rotationComb= comb
internalForces= InternalForces(y,envelopeMd, envelopeVd, abs(envelopeMdHeel), abs(envelopeVdHeel))
self.sls_results= WallSLSResults(internalForces,rotation, rotationComb)
return self.sls_results
def performULSAnalysis(self,combinations):
y= self.getStemYCoordinates()
envelopeMd= [0]*len(y)
envelopeVd= [0]*len(y)
envelopeMdHeel= 1.0e15
envelopeVdHeel= 1.0e15
for comb in combinations:
reactions= self.resultComb(comb)
envelopeMd, envelopeVd, envelopeMdHeel, envelopeVdHeel= self.getEnvelopeInternalForces(envelopeMd, envelopeVd, envelopeMdHeel, envelopeVdHeel)
internalForces= InternalForces(y,envelopeMd, envelopeVd, abs(envelopeMdHeel), abs(envelopeVdHeel))
self.uls_results= WallULSResults(internalForces)
return self.uls_results
|
gpl-3.0
|
kagayakidan/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
68
|
43439
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
lepmik/nest-simulator
|
topology/examples/test_3d_gauss.py
|
13
|
2924
|
# -*- coding: utf-8 -*-
#
# test_3d_gauss.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75, -0.75, -0.75],
'upper_right': [0.75, 0.75, 0.75]}},
'kernel': {'gaussian': {'p_center': 1., 'sigma': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
|
gpl-2.0
|
alekz112/statsmodels
|
statsmodels/examples/tut_ols_rlm_short.py
|
34
|
1649
|
'''Examples: comparing OLS and RLM
robust estimators and outliers
RLM is less influenced by outliers than OLS and has estimated slope
closer to true slope and not tilted like OLS.
Note: uncomment plt.show() to display graphs
'''
from __future__ import print_function
import numpy as np
#from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, np.ones(nsample)]
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [0.5, 5.]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# Example: estimate linear function (true is linear)
plt.figure()
plt.plot(x1, y2, 'o', x1, y_true2, 'b-')
res2 = sm.OLS(y2, X).fit()
print("OLS: parameter estimates: slope, constant")
print(res2.params)
print("standard deviation of parameter estimates")
print(res2.bse)
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.plot(x1, res2.fittedvalues, 'r-')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
#compare with robust estimator
resrlm2 = sm.RLM(y2, X).fit()
print("\nRLM: parameter estimates: slope, constant")
print(resrlm2.params)
print("standard deviation of parameter estimates")
print(resrlm2.bse)
plt.plot(x1, resrlm2.fittedvalues, 'g.-')
plt.title('Data with Outliers; blue: true, red: OLS, green: RLM')
# see also help(sm.RLM.fit) for more options and
# module sm.robust.scale for scale options
plt.show()
|
bsd-3-clause
|
hsuantien/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
slinderman/pyhawkes
|
examples/inference/standard_sgd_demo.py
|
1
|
3600
|
import numpy as np
import matplotlib.pyplot as plt
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab, DiscreteTimeStandardHawkesModel
def sample_from_network_hawkes(C, K, T, dt, B):
# Create a true model
p = 0.8 * np.eye(C)
v = 10.0 * np.eye(C) + 20.0 * (1-np.eye(C))
c = (0.0 * (np.arange(K) < 10) + 1.0 * (np.arange(K) >= 10)).astype(np.int)
true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(C=C, K=K, dt=dt, B=B, c=c, p=p, v=v)
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A,
true_model.weight_model.W,
vmax=0.5)
# Sample from the true model
S,R = true_model.generate(T=T)
# Return the spike count matrix
return S, R, true_model
def demo(seed=None):
"""
Suppose we have a very long recording such that computing gradients of
the log likelihood is quite expensive. Here we explore the use of
stochastic gradient descent to fit the standard Hawkes model, which has
a convex log likelihood. We first initialize the parameters using BFGS
on a manageable subset of the data. Then we use SGD to refine the parameters
on the entire dataset.
:return:
"""
raise NotImplementedError("This example needs to be updated.")
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
C = 1 # Number of clusters in the true data
K = 10 # Number of nodes
T = 10000 # Number of time bins to simulate
dt = 1.0 # Time bin size
B = 3 # Number of basis functions
# Sample from the network Hawkes model
S, R, true_model = sample_from_network_hawkes(C, K, T, dt, B)
# Make a model to initialize the parameters
init_len = 256
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, B=B, beta=1.0)
init_model.add_data(S[:init_len, :])
print("Initializing with BFGS on first ", init_len, " time bins.")
init_model.fit_with_bfgs()
# Make another model for inference
test_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, B=B, beta=1.0)
# Initialize with the BFGS parameters
test_model.weights = init_model.weights
# Add the data in minibatches
test_model.add_data(S, minibatchsize=256)
# Plot the true and inferred firing rate
kplt = 0
plt.figure()
plt.plot(np.arange(256), R[:256,kplt], '-k', lw=2)
plt.ion()
ln = plt.plot(np.arange(256), test_model.compute_rate(ks=kplt)[:256], '-r')[0]
plt.show()
# Gradient descent
N_steps = 10000
lls = []
learning_rate = 0.01 * np.ones(N_steps)
momentum = 0.8 * np.ones(N_steps)
prev_velocity = None
for itr in range(N_steps):
W,ll,prev_velocity = test_model.sgd_step(prev_velocity, learning_rate[itr], momentum[itr])
lls.append(ll)
# Update plot
if itr % 5 == 0:
ln.set_data(np.arange(256), test_model.compute_rate(ks=kplt))
plt.title("Iteration %d" % itr)
plt.pause(0.001)
plt.ioff()
print("W true: ", true_model.weight_model.A * true_model.weight_model.W)
print("lambda0 true: ", true_model.bias_model.lambda0)
print("")
print("W test: ", test_model.W)
print("lambda0 test ", test_model.bias)
plt.figure()
plt.plot(np.arange(N_steps), lls)
plt.xlabel("Iteration")
plt.ylabel("Log likelihood")
plot_network(np.ones((K,K)), test_model.W)
plt.show()
# demo(2203329564)
# demo(1940839255)
# demo(288408413)
# demo(2074381354)
demo()
|
mit
|
schets/scikit-learn
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
13
|
4187
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.