repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
JosephKJ/SDD-RFCN-python | lib/objectness/utils.py | 1 | 5070 | import scipy
import os
import cv2
import numpy as np
from map import HeatMap
from sklearn.metrics import jaccard_similarity_score
from timer import Timer
from gc_executor import GC_executor
def generate_objectness_map(heatMapObj, image, hr_method='interpolation', use_gradcam=True):
"""
Generates the objectness confidence score, for a given image.
:param heatMapObj: An object of the heatmap Class
:param image: The image which should be processed
:param hr_method: optional, to so SR or not.
:return: binary_map: which contains the objectness info; filtered_image: which is the map applied to the image.
"""
# 1. Create a Higher Resolution Image
img_gc = None
img = scipy.misc.imresize(image, 8.0, interp='bicubic')
if hr_method == 'super_resolution':
# TODO: Super Resolution
pass
# 2. Get objectness
timer = Timer()
timer.tic()
heat_map = heatMapObj.get_map(img)
# Adding for GC
if use_gradcam:
heat_map_for_gc = heat_map.data * ~heat_map.mask
gc = GC_executor()
heat_map_for_gc = scipy.misc.imresize(heat_map_for_gc, image.shape[0:2], interp='bicubic')
# img_gc, binary_map = gc.grab_cut_with_patch(np.copy(image), np.copy(heat_map_for_gc))
img_gc, binary_map = gc.grab_cut_without_patch(np.copy(image))
negative_binary_map = 1 - binary_map
else:
timer.toc()
# print 'Heatmap genetation took {:.3f}s '.format(timer.total_time)
# print timer.total_time
min_pixel_intensity = heat_map.min()
binary_map = np.where(heat_map > min_pixel_intensity, 1, 0)
negative_binary_map = np.where(heat_map > min_pixel_intensity, 0, 1)
# Trim off any extra rows in the map
map_h, map_w = binary_map.shape
img_h, img_w, _ = image.shape
if map_h > img_h:
diff = map_h - img_h
binary_map = np.delete(binary_map, diff, axis=0) # remove 'diff' rows
negative_binary_map = np.delete(negative_binary_map, diff, axis=0) # remove 'diff' rows
if map_w > img_w:
diff = map_w - img_w
binary_map = np.delete(binary_map, diff, axis=1) # remove 'diff' columns
negative_binary_map = np.delete(negative_binary_map, diff, axis=1) # remove 'diff' columns
# Remove the border in the detections
border = 2
temp = np.zeros_like(binary_map)
temp[border:-border, border:-border] = binary_map[border:-border, border:-border]
binary_map = temp
temp = np.ones_like(negative_binary_map)
temp[border:-border, border:-border] = negative_binary_map[border:-border, border:-border]
negative_binary_map = temp
# Adding for GC ends
# Calculate the IoU
iou = findIoU(image, binary_map)
# Calculate objectness score
# It is the percentage of pixels that are not black
h, w = binary_map.shape
obj_score = np.count_nonzero(binary_map) / (w * h * 1.)
# Expand the map to three channels
three_channel_map = np.stack((binary_map, binary_map, binary_map), axis=2)
# Applying map on the image
filtered_image = image * three_channel_map
filtered_image = filtered_image.astype(np.uint8)
return binary_map, negative_binary_map, filtered_image, iou, obj_score, img_gc
def findIoU(image, preditiction):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Otsu's Threshholding
ret, thresh = cv2.threshold(gray, 0, 1, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Noise removal
# kernel = np.ones((2, 2), np.uint8)
# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
gt = thresh.flatten()
mask = preditiction.flatten()
iou = jaccard_similarity_score(gt, mask)
return iou
def semantic_segment_image(heatMapObj, image, color='red'):
# Getting the objectness
binary_map, negative_binary_map, filtered_image, iou, obj_score, img_gc = generate_objectness_map(heatMapObj, image)
# Calculating the background
three_channel_map = np.stack((negative_binary_map, negative_binary_map, negative_binary_map), axis=2)
background = (image * three_channel_map).astype(np.uint8)
# Segmentation Foreground
r,g,b = get_rgb_from_color(color)
foreground = np.stack((binary_map*r, binary_map*g, binary_map*b), axis=2).astype(np.uint8)
# Combined Image
full_image = background + foreground
return full_image, iou, obj_score
def get_rgb_from_color(color):
colors = {'red': (255, 83, 26), 'green': (26, 255, 83), 'blue': (26, 140, 255),
'black': (77, 0, 77), 'white': (230, 230, 230), 'violet': (255, 26, 255)}
return colors[color];
if __name__ == '__main__':
print('Inside Main.')
hm = HeatMap()
image_path = os.path.join(
'/home/cs17mtech01001/workspace/SDD-RFCN-python/data/detections/bookstore_video0_9500_pedestrian_2.png')
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
semantic_segment_image(hm, img, 'red') | mit |
senior7515/librdkafka | tests/performance_plot.py | 6 | 2901 | #!/usr/bin/env python
#
import sys, json
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
def semver2int (semver):
if semver == 'trunk':
semver = '0.10.0.0'
vi = 0
i = 0
for v in reversed(semver.split('.')):
vi += int(v) * (i * 10)
i += 1
return vi
def get_perf_data (perfname, stats):
""" Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays
labels: broker versions
x: list with identical value (to plot on same x point)
y: perfname counter (average)
errs: errors
"""
ver = defaultdict(list)
# Per version:
# * accumulate values
# * calculate average
# * calculate error
# Accumulate values per version
for x in stats:
v = str(x[0])
ver[v].append(x[1][perfname])
print('%s is %s' % (perfname, ver))
labels0 = sorted(ver.keys(), key=semver2int)
y0 = list()
errs0 = list()
# Maintain order by using labels0
for v in labels0:
# Calculate average
avg = sum(ver[v]) / float(len(ver[v]))
y0.append(avg)
# Calculate error
errs0.append(max(ver[v]) - avg)
labels = np.array(labels0)
y1 = np.array(y0)
x1 = np.array(range(0, len(labels)))
errs = np.array(errs0)
return [labels,x1,y1,errs]
def plot (description, name, stats, perfname, outfile=None):
labels,x,y,errs = get_perf_data(perfname, stats)
colors = np.random.rand(len(labels))
plt.title('%s: %s %s' % (description, name, perfname))
plt.xlabel('Kafka version')
plt.ylabel(perfname)
plt.errorbar(x, y, yerr=errs, alpha=0.5)
plt.xticks(x, labels, rotation='vertical')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.2)
if outfile is None:
plt.show()
else:
plt.savefig(outfile, bbox_inches='tight')
return
if __name__ == '__main__':
outfile = sys.argv[1]
reports = []
for rf in sys.argv[2:]:
with open(rf) as f:
reports.append(json.load(f))
stats = defaultdict(list)
# Extract performance test data
for rep in reports:
perfs = rep.get('tests', dict()).get('0038_performance', list).get('report', None)
if perfs is None:
continue
for perf in perfs:
for n in ['producer','consumer']:
o = perf.get(n, None)
if o is None:
print('no %s in %s' % (n, perf))
continue
stats[n].append((rep.get('broker_version', 'unknown'), o))
for t in ['producer','consumer']:
for perfname in ['mb_per_sec', 'records_per_sec']:
plot('librdkafka 0038_performance test: %s (%d samples)' % \
(outfile, len(reports)),
t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname))
| bsd-2-clause |
zpace/stellarmass_pca | cosmicsfr.py | 1 | 1899 | import numpy as np
import matplotlib.pyplot as plt
import os, sys, glob
from importer import *
from astropy.cosmology import WMAP9, z_at_value
from astropy import units as u, constants as c
from astropy.io import fits
from figures_tools import savefig
def masked_z_at_value(fn, val, *args, **kwargs):
try:
z = z_at_value(fn, val, *args, **kwargs)
return z, False
except:
return 0., True
def make_cosmic_sfr(CSP_dir):
SFHs_fnames = glob.glob(os.path.join(CSP_dir, 'SFHs_*.fits'))
nsubpersfh = fits.getval(SFHs_fnames[0], ext=0, keyword='NSUBPER')
SFHs = np.row_stack(
[fits.getdata(fn_, 'allsfhs') / fits.getdata(fn_, 'mformed')[::nsubpersfh, None]
for fn_ in SFHs_fnames])
ts = fits.getdata(SFHs_fnames[0], 'allts')
zs, zmasks = zip(*[masked_z_at_value(WMAP9.age, t_ * u.Gyr) for t_ in ts])
zs = np.ma.array(zs, mask=zmasks)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
SFRDmean = SFHs.mean(axis=0) / WMAP9.scale_factor(zs)**3.
ax.plot(1. / WMAP9.scale_factor(zs), SFRDmean / SFRDmean.max())
ax.set_xlim([1., 1. / WMAP9.scale_factor(10.)])
ax.set_yscale('log')
ax.set_ylim([.009, 1.05])
ax.set_xlabel(r'$\frac{1}{a}$', size='x-small')
ax.set_ylabel(r'$\log{\psi}$', size='x-small')
ax.tick_params(labelsize='x-small', which='both')
ax_ = ax.twiny()
ax_.set_xlim(ax.get_xlim())
zticks = np.linspace(0., 10., 11)
inv_sf_ticks = 1. / WMAP9.scale_factor(zticks)
ax_.set_xticks(inv_sf_ticks, minor=False)
ax_.set_xticklabels(zticks)
ax_.tick_params(labelsize='x-small')
ax_.set_xlabel(r'$z$', size='x-small')
fig.suptitle('``Cosmic" SFR', size='small')
savefig(fig, 'CosmicSFR.png', CSP_dir, close=True)
if __name__ == '__main__':
CSP_dir = '/usr/data/minhas2/zpace/CSPs/CSPs_CKC14_MaNGA_20180320-1/'
make_cosmic_sfr(CSP_dir)
| mit |
arnomoonens/DeepRL | yarll/misc/envs_statistics.py | 1 | 1065 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import argparse
import json
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("envs_dir", type=str, help="Directory with environment specification files")
def main():
args = parser.parse_args()
_, _, env_spec_files = next(os.walk(args.envs_dir))
values = None
for env_spec_file in env_spec_files:
with open(os.path.join(args.envs_dir, env_spec_file)) as f:
envs = json.load(f)
if values is None:
values = [{} for _ in range(len(envs))]
for i, env in enumerate(envs):
for key, value in env.items():
if key == "name":
continue
values[i].setdefault(key, []).append(value)
keys = values[0].keys()
for key in keys:
fig = plt.figure()
plt.boxplot([env[key] for env in values])
plt.title(key)
fig.canvas.set_window_title(key)
plt.show()
if __name__ == '__main__':
main()
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| apache-2.0 |
pytrainer/pytrainer | pytrainer/gui/drawArea.py | 1 | 20427 | # -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez [email protected]
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import matplotlib
matplotlib.use('GTK3Agg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvasGTK
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
import matplotlib.pyplot as plt
import pylab
import logging
class DrawArea:
def __init__(self, vbox = None, window = None):
logging.debug('>>')
#self.figure = Figure(figsize=(6,4), dpi=72)
#self.axis = self.figure.add_subplot(111)
self.vbox = vbox
self.window = window
#self.canvas = FigureCanvasGTK(self.figure) # a gtk.DrawingArea
#self.drawDefault()
self.NEARLY_ZERO = 0.0000000000000000000001
logging.debug('<<')
def stadistics(self,type,xvalues,yvalues,xlabel,ylabel,title,color=None,zones=None):
logging.debug('>>')
if len(xvalues[0]) < 1:
#self.drawDefault()
return False
#logging.debug('xvalues: '+str(xvalues))
#logging.debug('yvalues: '+str(yvalues))
#logging.debug("Type: "+type+" | title: "+str(title)+" | col: "+str(color)+" | xlabel: "+str(xlabel)+" | ylabel: "+str(ylabel))
if type == "bars":
self.drawBars(xvalues,yvalues,xlabel,ylabel,title,color)
elif type == "plot":
self.drawPlot(xvalues,yvalues,xlabel,ylabel,title,color,zones)
elif type == "pie" or type == "histogram":
self.drawZones(type,xvalues,yvalues,xlabel,ylabel,title,color,zones)
logging.debug('<<')
def drawBars(self,xvalues,yvalues,xlabel,ylabel,title,color):
logging.debug('>>')
logging.debug("Type: bars | title: %s"" | col: %s | xlabel: %s | ylabel: %s",
title, color, xlabel, ylabel)
self.removeVboxChildren()
#figure = Figure(figsize=(6,4), dpi=72)
figure = plt.figure()
logging.debug("Figure: %s", figure)
numCols=len(xvalues[0])
xmod = 0.4
self.showGraph=False
axis = figure.add_subplot(111)
logging.debug("Axis: %s", axis)
if len(xvalues) == 1: #One axis
barWidth = 0.8
barOffset = 0.1
logging.debug("One axis, barWidth %f, barOffset %f", barWidth, barOffset)
elif len(xvalues) == 2: #Twin axes
barWidth = 0.4
barOffset = 0.1
logging.debug("Twin axes, barWidth %f, barOffset %f", barWidth, barOffset)
else: #Error
logging.debug("Error: invalid number of axes" )
return
axis.set_xlabel(xlabel[0])
axis.set_ylabel(ylabel[0])
logging.debug("Labels set x: %s, y: %s", xlabel[0], ylabel[0])
xvals = [x+barOffset for x in range(0, numCols)]
yvals = [0] * numCols
for i in range(0, numCols):
yval = yvalues[0][i]
if float(yval) > 0.0:
self.showGraph=True
else:
yval = self.NEARLY_ZERO
yvals[i] = yval
if self.showGraph:
logging.debug("Drawing bars")
axis.bar(xvals, yvals, barWidth, color=color[0], align='edge')
else: #Only zero results
logging.debug("No results to draw")
pass
axis.grid(True)
axis.set_title("%s" %(title[0]))
logging.debug("Setting title to: %s", title[0])
for tl in axis.get_yticklabels():
logging.debug("Setting ticklabel color %s", color[0])
tl.set_color('%s' %color[0])
if len(xvalues) == 2: #Display twin axis
ax2 = axis.twinx()
logging.debug("Axis 2: Twin axis: %s", ax2)
xvals = [x+barOffset+barWidth for x in range(0, numCols)]
for i in range(0, numCols):
yval = yvalues[1][i]
if float(yval) > 0.0:
self.showGraph=True
else:
yval = self.NEARLY_ZERO
yvals[i] = yval
if self.showGraph:
logging.debug("Axis 2: Drawing bars")
ax2.bar(xvals, yvals, barWidth, color=color[1], align='edge')
logging.debug("Axis 2: Label set y: %s", ylabel[1])
ax2.set_ylabel(ylabel[1])
else: #Only zero results
logging.debug("Axis 2: No results to draw")
pass
for tl in ax2.get_yticklabels():
tl.set_color('%s' %color[1])
logging.debug("Axis 2: Setting ticklabel color %s", color[1])
_title = "%s vs %s" %(title[0],title[1])
logging.debug("Axis 2: Setting title to: %s", _title)
axis.set_title(_title)
logging.debug("Setting x ticks")
tickLocations = [x+0.5 for x in range(0, numCols)]
axis.set_xticks(tickLocations)
axis.set_xticklabels(xvalues[0])
logging.debug("Setting x limits")
axis.set_xlim(0, numCols)
canvas = FigureCanvasGTK(figure) # a gtk.DrawingArea
logging.debug("Got canvas: %s", canvas)
canvas.show()
logging.debug("Adding canvas to vbox")
self.vbox.pack_start(canvas, True, True, 0)
#toolbar = NavigationToolbar(canvas, self.window)
#self.vbox.pack_start(toolbar, False, False)
for child in self.vbox.get_children():
logging.debug('Child available: %s', child)
logging.debug('<<')
def getColor(self, x):
colors=["b","g","r","c","m","y","k", "w"]
if x >= len(colors):
x = x % len(colors)
return colors[x]
def fmtTableText(self, x, valuesAreTime):
if x <= 0.0001:
return ' '
elif valuesAreTime:
hour = int(x)
minutes = int((x-hour)*60)
hourLabel = _("h")
minLabel = _("min")
if hour > 0:
return "%d%s %02d%s" % (hour, hourLabel, minutes, minLabel)
else:
return "%02d%s" % (minutes, minLabel)
else:
return '%1.1f' % x
def drawStackedBars(self,xvalues,yvalues,ylabel,title, valuesAreTime=False, colors={}):
'''function to draw stacked bars
xvalues needs to be a list of lists of strings, e.g. [0]["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
yvalues needs to be a list of dicts e.g. [0]{'Kayak': {'Tue': 10.08, 'Fri': 17.579999999999998, 'Thu': 15.66, 'Sat': 30.619999999999997}, {'Run': {'Mon': 9.65, 'Sun': 15.59}}
'''
#TODO tidy
logging.debug('>>')
logging.debug("Title: %s", title)
logging.debug("X values received: %s", xvalues)
logging.debug("Y values received: %s", yvalues)
self.removeVboxChildren()
#Check how many axes to draw
if len(xvalues) == 1: #One axis
barWidth = 0.8
barOffset = 0.1
elif len(xvalues) == 2: #Twin axes
barWidth = 0.4
barOffset = 0.1
else: #Error
return
keys = list(yvalues[0].keys()) # days of the week
numRows = len(keys)
numCols = len(xvalues[0])
if numRows == 0:
return
width = .8
#figure = plt.figure(figsize=(6,4), dpi=72)
figure = plt.figure()
logging.debug("Figure: %s", figure)
axis = plt.subplot(111)
ybottoms = [0] * numCols
yheights = [0] * numCols
inds = range(0, numCols)
xvals = [x+barOffset for x in range(0, numCols)]
cellText = []
self.showGraph=False
for k in colors:
if colors[k]==None: colors[k]=''
#Display first axis
xticks = []
for key in keys:
logging.debug("Day of the week: %s", key)
for ind in inds:
ybottoms[ind] += yheights[ind]
yheights[ind] = 0 #Zero heights
color = "#"+colors.get(key, '')
if len(color)<2:
color = self.getColor(keys.index(key))
for xvalue in xvalues[0]:
index = xvalues[0].index(xvalue)
if xvalue in yvalues[0][key]:
height = yvalues[0][key][xvalue]
if float(height) > 0.0:
self.showGraph=True
else:
height = self.NEARLY_ZERO
yheights[index] = height
cellText.append([self.fmtTableText(x, valuesAreTime[0]) for x in yheights])
if self.showGraph:
axis.bar(xvals, yheights, bottom=ybottoms, width=barWidth, color=color, align='edge', label=key)
else: #Only zero results
pass
axis.set_xticklabels('' * len(xvalues[0]))
axis.set_ylabel(ylabel[0])
if len(xvalues) == 1:
plt.title(title[0])
axis.legend(loc=0)
axis.set_xlim(0,numCols)
logging.debug("X values first axis: %s", xvals)
logging.debug("Y values first axis: %s", yheights)
#Display twin axis
if len(xvalues) == 2:
self.showGraph=False
ax2 = axis.twinx()
keys = list(yvalues[1].keys())
ybottoms = [0] * numCols
yheights = [self.NEARLY_ZERO] * numCols
for key in keys:
for ind in inds:
ybottoms[ind] += yheights[ind]
yheights[ind] = 0.0 #Zero heights
color = "#"+colors.get(key, '')
if len(color)<2:
color = self.getColor(keys.index(key))
for xvalue in xvalues[0]:
index = xvalues[0].index(xvalue)
if xvalue in yvalues[1][key]:
height = yvalues[1][key][xvalue]
if float(height) > 0.0:
self.showGraph=True
else:
height = self.NEARLY_ZERO
yheights[index] = height
textToAdd = self.fmtTableText(height, valuesAreTime[1])
if textToAdd is not ' ':
row = keys.index(key)
col = index
cellText[row][col] += " | %s" % (self.fmtTableText(height, valuesAreTime[1]))
#print "Would add %s to %s %s" % (self.fmtTableText(height, valuesAreTime[1]), index, keys.index(key))
if self.showGraph:
xvals = [x+barOffset+barWidth for x in range(0, numCols)]
#print "ax2", xvals, yheights, ybottoms
ax2.bar(xvals, yheights, bottom=ybottoms, width=barWidth, color=color, align='edge', label=key)
else: #Only zero results
ax2.bar(xvals, [0]*numCols, bottom=[0]*numCols, width=barWidth, color=color, align='edge', label=key)
pass
ax2.set_xticklabels('' * len(xvalues[1]))
ax2.set_xlim(0,numCols)
ax2.set_ylabel(ylabel[1])
ax2.legend(loc=0)
plt.title("%s vs %s" %(title[0],title[1]))
## try to do some table stuff
colLabels = xvalues[0]
rowLabels = keys
axis.table(cellText=cellText, cellLoc='center', rowLabels=rowLabels, colLabels=colLabels, loc='bottom')
plt.subplots_adjust(left=0.15,bottom=0.08+(0.03*numRows))
axis.grid(True)
canvas = FigureCanvasGTK(figure) # a gtk.DrawingArea
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
#toolbar = NavigationToolbar(canvas, self.window)
#self.vbox.pack_start(toolbar, False, False)
for child in self.vbox.get_children():
logging.debug('Child available: %s', child)
logging.debug('<<')
def drawPlot(self,xvalues,yvalues,xlabel,ylabel,title,color,zones=None,xzones=None, ylimits=None, y1_linewidth=None):
logging.debug('>>')
logging.debug("Type: plot | title: %s | col: %s | xlabel: %s | ylabel: %s",
title, color, xlabel, ylabel)
logging.debug('xlabel: %s | ylabel: %s | title: %s', xlabel, ylabel, title)
self.removeVboxChildren()
figure = plt.Figure()
logging.debug("Figure: %s", figure)
#figure.clf()
i = 0
for value in xvalues:
if i<1:
logging.debug("i: %d, value: (%s) %s %s", i, value, xvalues, yvalues)
axis = figure.add_subplot(111)
logging.debug("Axis: %s", axis)
line = axis.plot(xvalues[i],yvalues[i], color=color[i])
logging.debug("Axis plotted, Line: %s", line)
if y1_linewidth is not None:
line[0].set_linewidth(y1_linewidth)
linewidth = line[0].get_linewidth()
axis.grid(True)
logging.debug("Axis grid on" )
for tl in axis.get_yticklabels():
tl.set_color('%s' %color[i])
logging.debug("Ticklabels color set" )
#Draw zones on graph, eg for each lap
if xzones is not None:
logging.debug("Setting xzones" )
for xzone in xzones:
if xzones.index(xzone) % 2:
zonecolor='b'
else:
zonecolor='g'
axis.axvspan(xzone[0], xzone[1], alpha=0.25, facecolor=zonecolor)
maxX = max(xvalues[i])
if i>=1:
ax2 = axis.twinx()
logging.debug("Axis2: Axis: %s", ax2)
ax2.plot(xvalues[i], yvalues[i], color=color[i])
logging.debug("Axis2: plotted" )
for tl in ax2.get_yticklabels():
tl.set_color('%s' %color[i])
logging.debug("Axis2: Ticklabels color set" )
maxXt = max(xvalues[i])
if maxXt > maxX:
maxX = maxXt
axis.set_xlabel(xlabel[i])
logging.debug("X label set" )
i+=1
axis.set_xlim(0, maxX)
if (len(xvalues)>1):
axis.set_title("%s vs %s" %(ylabel[0],ylabel[1]))
else:
axis.set_title("%s" %(ylabel[0]))
ylim_min, ylim_max = axis.get_ylim()
if ylimits is not None:
logging.debug("Using ylimits: %s", ylimits)
if ylimits[0] is not None:
ylim_min = ylimits[0]
if ylimits[1] is not None:
ylim_max = ylimits[1]
axis.set_ylim(ylim_min, ylim_max)
canvas = FigureCanvasGTK(figure) # a gtk.DrawingArea
logging.debug("Canvas: %s", canvas)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
toolbar = NavigationToolbar(canvas, self.window)
self.vbox.pack_start(toolbar, False, False, 0)
for child in self.vbox.get_children():
logging.debug('Child available: %s', child)
logging.debug('<<')
return {'y1_min': ylim_min, 'y1_max': ylim_max, 'y1_linewidth': linewidth}
def drawZones(self,shape,xvalues,yvalues,xlabel,ylabel,title,color,zones=None):
logging.debug('>>')
logging.debug("Type: pie | title: %s | col: %s | xlabel: %s | ylabel: %s",
title, color, xlabel, ylabel)
self.removeVboxChildren()
figure = Figure()
logging.debug("Figure: %s", figure)
axis = figure.add_subplot(111)
labels = [_("rest")]
colors = ["#ffffff"]
for zone in reversed(zones):
labels.append(zone[3])
colors.append(zone[2])
zone_sum = [0]*6
for value in yvalues[0]:
# bisection, it's faster
if not value:
zone_sum[0] += 1
elif value <= zones[2][1]:
if value <= zones[4][1]:
if value <= zones[4][0]:
zone_sum[0] += 1
else:
zone_sum[1] += 1
else:
if value <= zones[3][1]:
zone_sum[2] += 1
else:
zone_sum[3] += 1
else:
if value <= zones[1][1]:
zone_sum[4] += 1
else:
zone_sum[5] += 1
if shape == "pie":
self._piePlot(axis, zone_sum, colors, labels)
elif shape == "histogram":
self._barPlot(axis, zone_sum, colors, labels)
canvas = FigureCanvasGTK(figure) # a gtk.DrawingArea
canvas.show()
for child in self.vbox.get_children():
logging.debug('Child available: %s', child)
self.vbox.pack_start(canvas, True, True, 0)
logging.debug('<<')
def _barPlot(self, axis, zone_sum, colors, labels):
invtotal = 100.0/sum(zone_sum)
fracs = [i*invtotal for i in zone_sum]
xticks = list(range(len(fracs)))
axis.bar(xticks, fracs, color=colors, align="center")
axis.set_xticks(xticks)
axis.set_xticklabels(labels)
axis.set_ylabel(_("Time in zone [%]"))
def _piePlot(self, axis, zone_sum, colors, labels):
labels_trunc = [l for z,l in zip(zone_sum, labels) if z > 0]
colors_trunc = [c for z,c in zip(zone_sum, colors) if z > 0]
zone_trunc = [z for z in zone_sum if z > 0]
explode = [0]*len(zone_trunc)
axis.pie(zone_trunc, explode=explode, labels=labels_trunc, colors=colors_trunc, autopct='%1.1f%%', shadow=True)
def drawDefault(self):
logging.debug('>>')
self.axis=self.figure.add_subplot(111)
self.axis.set_xlabel('Yepper')
self.axis.set_ylabel('Flabber')
self.axis.set_title('An Empty Graph')
self.axis.grid(True)
self.canvas.destroy()
self.canvas = FigureCanvasGTK(self.figure) # a gtk.DrawingArea
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
logging.debug('<<')
def fill_over(self, ax, x, y, val, color, over=True):
"""
Plot filled x,y for all y over val
if over = False, fill all areas < val
"""
logging.debug('>>')
ybase = asarray(y)-val
crossings = nonzero(less(ybase[:-1] * ybase[1:],0))
if ybase[0]>=0:
fillon = over
else:
fillon = not over
indLast = 0
for ind in crossings:
if fillon:
thisX = x[indLast:ind+1]
thisY = y[indLast:ind+1]
thisY[0] = val
thisY[-1] = val
ax.fill(thisX, thisY, color)
fillon = not fillon
indLast = ind
logging.debug('<<')
def removeVboxChildren(self):
''' function to delete vbox children so that multiple graphs do not appear
there must a better way to do this - pyplot?
'''
logging.debug('>>')
#Tidy up draw areas
vboxChildren = self.vbox.get_children()
logging.debug('Vbox has %d children %s', len(vboxChildren), vboxChildren)
# ToDo: check why vertical container is shared
for child in vboxChildren:
#Remove all FigureCanvasGTK and NavigationToolbar2GTKAgg to stop double ups of graphs
if isinstance(child, FigureCanvasGTK) or isinstance(child, NavigationToolbar):
logging.debug('Removing child: %s', child)
self.vbox.remove(child)
logging.debug('<<')
| gpl-2.0 |
bridwell/orca_sandbox | orca_sandbox/orca2.py | 1 | 12848 | """
Another take at trying to enhance orca to allow:
- Re-usable functions via an argmap.
- Dynamic evaluation of caches.
- Attaching data frames AND columns.
- Better handling of broadcasts and relationships across tables.
"""
import inspect
import pandas as pd
########################
# PRIVATE MODULE-LEVEL
########################
def _init_globals():
"""
Initializes module-level variables.
"""
global _injectables, _clear_events, _attachments
# store all injectables in a single dictionary, therefore names must be unique
_injectables = {}
# stores the injectables along with the events that can clear their cache
_clear_events = {
'clear_all': set(),
'run': set(),
'iteration': set(),
'step': set()
}
# stored relationshipes between injectables, keys are the name of the destination
# injectable, values are sets of the linked injectable (i.e. columns)
_attachments = {}
# initialize the globals upon importing
# is there a better way to do this?
_init_globals()
def _clear_caches(event_name):
"""
Clears out the caches for the provided event.
"""
if event_name not in _clear_events:
raise ValueError('Event {}, does not exit'.format(event_name))
for inj_name in _clear_events[event_name]:
_injectables[inj_name].clear()
def _get_func_args(func):
"""
Returns a function's argument names and default values. These are used by other
functions to establish dependencies and collect inputs.
Parameters:
-----------
func: callable
The function/callable to inspect.
Returns:
--------
arg_names: list of str
List of argument names.
default_kwargs:
Dictionary of default values. Keyed by the argument name.
"""
# get function arguments
spec = inspect.getargspec(func)
args = spec.args
# get defaults
defaults = spec.defaults
# get keyword args for the function's default values
default_kwargs = {}
if defaults is not None:
kw_start_idx = len(args) - len(defaults)
default_kwargs = dict(zip([key for key in args[kw_start_idx:]], list(defaults)))
return args, default_kwargs
def _collect_inputs(func, requester, arg_map={}):
"""
Collects the inputs from the environment that are needed to execute the provided function.
** Still not sure what to do with defaults, I guess use these as expressions? **
Parameters:
-----------
func: callable:
The function callable to execute.
requester: str
Name of the injectable doing the collection.
arg_map: dict, optional, default {}
Dictionary that maps between the argument names of the function (keys) and the
corresponding injectables (values). This allows for re-using the same function
with different injected inputs.
For example:
def my_func(a):
...
arg_map = {'a': 'my_injectable'}
Would collect the injectable named 'my_injectable' to be used for the 'a' argument.
If no arg_map is provided then the injectable named 'a' would be collected for
the 'a' argument.
Returns:
--------
- Named keyword arg dictionary containing needed to execute the function.
- Bool indicating if the provided function can use a previously cached value or if it needs
to be re-evaluated, this is based on if the any of the required inputs have been
re-evaluated since they were last collected.
- True indicates that a cached value may be re-used.
- False indicates that the function needs to be re-evaluated.
"""
kwargs = {}
cache = True
# get function signature
arg_names, defaults = _get_func_args(func)
# if no args are needed by the function then we're done
if len(arg_names) == 0:
return kwargs, cache
# loop the through the function args and find the matching input
for a in arg_names:
if a == 'self':
# call coming from a class, ignore
continue
# fetch the injectable
name = a
if a in arg_map:
name = arg_map[a]
if name not in _injectables:
# argument not found
raise ValueError("Injectable {} not found".format(name))
inj = _injectables[name]
# do the collection
if not isinstance(inj, _AbstractWrapper):
raise ValueError("Injectable {} not based on AbstractWrapper".format(name))
kwargs[a], curr_cache = inj.collect(requester)
if not curr_cache:
cache = False
return kwargs, cache
def _create_injectable(name, wrapped, autocall=True, cache_scope=None, arg_map={}):
"""
Factory method to create an instance of an injectable. Note: this isn't actually
adding anthing to the environment. See add_injectable method for that.
"""
if not callable(wrapped):
return _ValueWrapper(name, wrapped)
if not autocall:
return _CallbackWrapper(name, wrapped)
if cache_scope is None:
return _FuncWrapper(name, wrapped, arg_map)
else:
use_collect_status = False
if cache_scope == 'inputs':
use_collect_status = True
return _CachedFuncWrapper(name, wrapped, use_collect_status, arg_map)
def _attach(name, attach_to):
"""
Links an injectable with other injectables it it attached to. Used to
bind columns or tables with other tables.
"""
if attach_to is None:
return
def attach(target_name):
if target_name in _attachments:
a = _attachments[target_name]
else:
a = set()
_attachments[target_name] = a
a.add(name)
if isinstance(attach_to, list):
for target_name in attach_to:
attach(target_name)
else:
attach(attach_to)
########################
# WRAPPER CLASSES
########################
class _AbstractWrapper(object):
"""
Abstract class for wrappers. TODO: enfore this with ABC.
"""
def clear(self):
"""
Clears and cached information.
"""
pass
def collect(self, requester):
"""
Evaluates and returns the injectable for the given requester.
Should return a tuple in the form result, cache_status
- Result: the result of the collection
- cache_status: bool True if a cached value was returned, False
if a new value is returned since the last call from the requesing
injectable.
"""
pass
class _ValueWrapper(_AbstractWrapper):
"""
Wraps a value.
"""
def __init__(self, name, value):
self.name = name
self._data = value
self.clear()
def clear(self):
"""
Clears out cached dependents. Not sure if I really need this?
"""
self._cached = set()
def collect(self, requester):
"""
Returns the wrapped value, and notifies the requesting injectable if it has
been provided before.
"""
if requester in self._cached:
return self._data, True
else:
self._cached.add(requester)
return self._data, False
class _CallbackWrapper(_AbstractWrapper):
"""
Wraps a callback function that can be injected into another function.
"""
def __init__(self, name, wrapped):
self.name = name
self._wrapped = wrapped
def clear(self):
pass
def collect(self, requester):
return self._wrapped, False
class _FuncWrapper(_AbstractWrapper):
"""
Wraps a function that does NOT support caching.
"""
def __init__(self, name, wrapped, arg_map={}):
self.name = name
self._wrapped = wrapped
self._arg_map = arg_map
def clear(self):
"""
Just implemented to support the Abstract. Not needed.
"""
pass
def collect(self, requester):
"""
Do the evaluation.
"""
collected, _ = _collect_inputs(self._wrapped, self._arg_map)
results = self._wrapped(collected)
return results, False
class _CachedFuncWrapper(_AbstractWrapper):
"""
Wraps a function that supports caching.
"""
def __init__(self, name, wrapped, use_collect_status=False, arg_map={}):
self.name = name
self._wrapped = wrapped
self._arg_map = arg_map
self.use_collect_status = use_collect_status
self.clear()
def clear(self):
"""
Clears out cached data.
"""
self._cached = set()
self._data = None
def collect(self, requester):
# collect the inputs
# remember:
# cache_status of True indicate the input has not change since the last collection
# cache_status of False indicates one or more of the inputs has changed
collected, cache_status = _collect_inputs(self._wrapped, self._arg_map)
# determine if we need to invalidate the cache
if self.use_collect_status and cache_status is False:
self.clear()
# evaluate the function if necessary
if self._data is None:
self._data = self.wrapped(collected)
# return the result and the cache status for the given requster
if requester in self.cached:
return self._data, True
else:
self._cached.add(requester)
return self.data, False
class _ColumnWrapper(_AbstractWrapper):
"""
Wraps a pandas.Series or a callable that returns one.
"""
def __init__(self, name, wrapped, cache_scope=None, attach_to=None, arg_map={}):
# create the injectable
self.name = name
self._injectable = _create_injectable(name, wrapped, True, cache_scope, arg_map)
# add attachments
_attach(name, attach_to)
def clear(self):
self._injectable.clear()
def collect(self, requester):
result, cache_status = self._injectable.collect(requester)
assert isinstance(result, pd.Series)
result.name = self.name
return result, cache_status
class _TableWrapper(_AbstractWrapper):
"""
Wraps a pandas.DataFame or a callable that returns one.
"""
def __init__(self, name, wrapped, cache_scope=None, attach_to=None, columns=None, arg_map={}):
# create the injectable
self.name = name
self._injectable = _create_injectable(name, wrapped, True, cache_scope, arg_map)
self._local_columns = columns
# add attachments
_attach(name, attach_to)
def clear(self):
# maintain a cache for each column?
self._cache = {} # this will be a dictionary of sets
def collect(self, requester):
"""
For collection/evaluation, return the wrapper.
TODO: return some type of view instead?
"""
return self
########################
# PUBLIC MODULE-LEVEL
########################
def clear_all():
"""
Re-initializes everything.
"""
_init_globals()
def clear_cache():
"""
Clears all caches.
"""
_clear_caches('clear_all')
def add_injectable(name, wrapped, autocall=True, cache_scope=None, arg_map={}):
"""
Creates and adds an injectable to the environment.
"""
inj = _create_injectable(name, wrapped, autocall, cache_scope, arg_map)
_injectables[name] = inj
# set up clear events
_clear_events['clear_all'].add(name)
if cache_scope in _clear_events.keys():
_clear_events[cache_scope].add(name)
"""
def add_column(name, wrapped, attach_to=None, clear_on=None, arg_map={}):
_injectables[name] = ColumnWrapper(name, wrapped, clear_on, attach_to, arg_map)
_notify_changed(name)
"""
"""
def add_table(name, wrapped, attach_to=None, clear_on=None, columns=None, arg_map={}):
_injectables[name] = TableWrapper(
name, wrapped, clear_on, attach_to, columns, arg_map)
_notify_changed(name)
"""
########################
# DECORATORS
########################
def get_name(name, func):
if name:
return name
else:
return func.__name__
def injectable(name=None, autocall=True, cache_scope=None):
"""
Decorates functions that will register
a generic injectable.
"""
def decorator(func):
add_injectable(get_name(name, func), func, autocall, cache_scope)
return func
return decorator
def callback(name=None):
"""
Decorates functions that will return a callback function.
"""
def decorator(func):
add_injectable(get_name(name, func), func, autocall=False)
return func
return decorator
| mit |
lfairchild/PmagPy | programs/revtest.py | 2 | 2187 | #!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
revtest.py
DESCRIPTION
calculates bootstrap statistics to test for antipodality
INPUT FORMAT
takes dec/inc as first two columns in space delimited file
SYNTAX
revtest.py [-h] [command line options]
OPTION
-h prints help message and quits
-f FILE, sets input filename on command line
-fmt [svg,png,jpg], sets format for image output
-sav saves the figures silently and quits
"""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=numpy.loadtxt(file).transpose()
D=numpy.array([data[0],data[1]]).transpose()
else:
print('-f is a required switch')
print(main.__doc__)
print(sys.exit())
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
# set up plots
d=""
CDF={'X':1,'Y':2,'Z':3}
pmagplotlib.plot_init(CDF['X'],5,5)
pmagplotlib.plot_init(CDF['Y'],5,5)
pmagplotlib.plot_init(CDF['Z'],5,5)
#
# flip reverse mode
#
D1,D2=pmag.flip(D)
counter,NumSims=0,500
#
# get bootstrapped means for each data set
#
print('doing first mode, be patient')
BDI1=pmag.di_boot(D1)
print('doing second mode, be patient')
BDI2=pmag.di_boot(D2)
pmagplotlib.plot_com(CDF,BDI1,BDI2,[""])
files={}
for key in list(CDF.keys()):
files[key]='REV'+'_'+key+'.'+fmt
if plot==0:
pmagplotlib.draw_figs(CDF)
ans= input("s[a]ve plots, [q]uit: ")
if ans=='a':
pmagplotlib.save_plots(CDF,files)
print('good bye')
sys.exit()
else:
pmagplotlib.save_plots(CDF,files)
if __name__ == "__main__":
main()
| bsd-3-clause |
mesnardo/PetIBM | examples/ibpm/cylinder2dRe550/scripts/plotDragCoefficient.py | 3 | 2004 | """
Plots the instantaneous drag coefficient between 0 and 3 time-units of flow
simulation and compares with numerical results from
Koumoutsakos and Leonard (1995).
_References:_
* Koumoutsakos, P., & Leonard, A. (1995).
High-resolution simulations of the flow around an impulsively started
cylinder using vortex methods.
Journal of Fluid Mechanics, 296, 1-38.
"""
import os
import pathlib
import numpy
import collections
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
root_dir = os.environ.get('PETIBM_EXAMPLES')
if not root_dir:
root_dir = simu_dir.parents[1]
data = collections.OrderedDict({})
# Reads forces from file.
label = 'PetIBM'
filepath = simu_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, usecols=(0, 1))
data[label] = {'t': t, 'cd': 2 * fx}
data[label]['kwargs'] = {}
# Reads drag coefficient of Koumoutsakos and Leonard (1995) for Re=550.
label = 'Koumoutsakos and Leonard (1995)'
filename = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe550.dat'
filepath = root_dir / 'data' / filename
with open(filepath, 'r') as infile:
t, cd = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
data[label] = {'t': 0.5 * t, 'cd': cd}
data[label]['kwargs'] = {'linewidth': 0, 'marker': 'o',
'markerfacecolor': 'none', 'markeredgecolor': 'black'}
pyplot.rc('font', family='serif', size=16)
# Plots the instantaneous drag coefficients.
fig, ax = pyplot.subplots(figsize=(8.0, 6.0))
ax.grid()
ax.set_xlabel('Non-dimensional time')
ax.set_ylabel('Drag coefficient')
for label, subdata in data.items():
ax.plot(subdata['t'], subdata['cd'], label=label, **subdata['kwargs'])
ax.axis((0.0, 3.0, 0.0, 2.0))
ax.legend()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'dragCoefficient.png'
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
luo66/scikit-learn | sklearn/ensemble/gradient_boosting.py | 50 | 67625 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._splitter import PresortBestSplitter
from ..tree._criterion import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
treycausey/scikit-learn | examples/exercises/plot_cv_digits.py | 1 | 1183 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial excercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import pylab as pl
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.semilogx(C_s, scores)
pl.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
pl.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = pl.yticks()
pl.yticks(locs, list(map(lambda x: "%g" % x, locs)))
pl.ylabel('CV score')
pl.xlabel('Parameter C')
pl.ylim(0, 1.1)
pl.show()
| bsd-3-clause |
reuk/wayverb | demo/evaluation/room_materials/spectrograms.py | 2 | 2200 | #!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from string import split
import scipy.signal as signal
import pysndfile
import math
import os
import re
import json
def get_specgram_data(fname):
sndfile = pysndfile.PySndfile(fname, 'r')
if sndfile.channels() != 1:
raise RuntimeError('please only load mono files')
Fs = sndfile.samplerate()
signal = sndfile.read_frames()
pxx, freq, time = mlab.specgram(signal, NFFT=4096, Fs=Fs)
return pxx, freq, time
def main():
files = [
("0.02", "0.02.wav"),
("0.04", "0.04.wav"),
("0.08", "0.08.wav"),
]
specgrams = [(label, get_specgram_data(fname)) for label, fname in files]
fig, axes = plt.subplots(nrows=len(specgrams), sharex=True)
cmap = plt.get_cmap('viridis')
Z = map(lambda (label, (pxx, freq, time)): 10 * np.log10(pxx), specgrams)
maxes = map(lambda z: np.nanmax(z), Z)
print maxes
vmin = -200
vmax = max(maxes)
for (label, (pxx, freq, time)), ax, z in zip(specgrams, axes, Z):
im = ax.pcolormesh(time, freq, z, cmap=cmap, vmin=vmin, vmax=vmax, rasterized=True)
ax.set_ylim(20, 20000)
ax.set_yscale('log')
ax.set_title('Absorption Coefficient: ' + label)
ax.set_xlabel('time / s')
ax.set_ylabel('frequency / Hz')
cb = fig.colorbar(im, ax=axes.ravel().tolist(), use_gridspec=True)
cb.set_label('dB')
plt.suptitle('Spectrograms of Impulse Responses from Rooms with Different Absorption Coefficients')
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.subplots_adjust(right=0.75)
plt.show()
if render:
plt.savefig('room_material_spectrograms.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'font.monospace': ['Input Mono Condensed'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
| gpl-2.0 |
spbguru/repo1 | examples/audiostream/audiostream_tp.py | 3 | 9960 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
See README.md for details.
"""
"""
numpy - the language of pyaudio (& everything else)
pyaudio - access to the mic via the soundcard
pyplot - to plot the sound frequencies
bitmaparray - encodes an array of indices into an SDR
TP10X2 - the C++ optimized temporal pooler (TP)
"""
import numpy
import pyaudio
import matplotlib.pyplot as plt
from nupic.encoders.bitmaparray import BitmapArrayEncoder
from nupic.research.TP10X2 import TP10X2 as TP
class Visualizations:
def calcAnomaly(self, actual, predicted):
"""
Calculates the anomaly of two SDRs
Uses the equation presented on the wiki:
https://github.com/numenta/nupic/wiki/Anomaly-Score-Memo
To put this in terms of the temporal pooler:
A is the actual input array at a given timestep
P is the predicted array that was produced from the previous timestep(s)
[A - (A && P)] / [A]
Rephrasing as questions:
What bits are on in A that are not on in P?
How does that compare to total on bits in A?
Outputs 0 is there's no difference between P and A.
Outputs 1 if P and A are totally distinct.
Not a perfect metric - it doesn't credit proximity
Next step: combine with a metric for a spatial pooler
"""
combined = numpy.logical_and(actual, predicted)
delta = numpy.logical_xor(actual,combined)
delta_score = sum(delta)
actual_score = float(sum(actual))
return delta_score / actual_score
def compareArray(self, actual, predicted):
"""
Produce an array that compares the actual & predicted
'A' - actual
'P' - predicted
'E' - expected (both actual & predicted
' ' - neither an input nor predicted
"""
compare = []
for i in range(actual.size):
if actual[i] and predicted[i]:
compare.append('E')
elif actual[i]:
compare.append('A')
elif predicted[i]:
compare.append('P')
else:
compare.append(' ')
return compare
def hashtagAnomaly(self, anomaly):
"""
Basic printout method to visualize the anomaly score (scale: 1 - 50 #'s)
"""
hashcount = '#'
for i in range(int(anomaly / 0.02)):
hashcount += '#'
for j in range(int((1 - anomaly) / 0.02)):
hashcount += '.'
return hashcount
class AudioStream:
def __init__(self):
"""
Instantiate temporal pooler, encoder, audio sampler, filter, & freq plot
"""
self.vis = Visualizations()
"""
The number of columns in the input and therefore the TP
2**9 = 512
Trial and error pulled that out
numCols should be tested during benchmarking
"""
self.numCols = 2**9
sparsity = 0.10
self.numInput = int(self.numCols * sparsity)
"""
Create a bit map encoder
From the encoder's __init__ method:
1st arg: the total bits in input
2nd arg: the number of bits used to encode each input bit
"""
self.e = BitmapArrayEncoder(self.numCols, 1)
"""
Sampling details
rate: The sampling rate in Hz of my soundcard
buffersize: The size of the array to which we will save audio segments (2^12 = 4096 is very good)
secToRecord: The length of each sampling
buffersToRecord: how many multiples of buffers are we recording?
"""
rate=44100
secToRecord=.1
self.buffersize=2**12
self.buffersToRecord=int(rate*secToRecord/self.buffersize)
if not self.buffersToRecord:
self.buffersToRecord=1
"""
Filters in Hertz
highHertz: lower limit of the bandpass filter, in Hertz
lowHertz: upper limit of the bandpass filter, in Hertz
max lowHertz = (buffersize / 2 - 1) * rate / buffersize
"""
highHertz = 500
lowHertz = 10000
"""
Convert filters from Hertz to bins
highpass: convert the highHertz into a bin for the FFT
lowpass: convert the lowHertz into a bin for the FFt
NOTES:
highpass is at least the 1st bin since most mics only pick up >=20Hz
lowpass is no higher than buffersize/2 - 1 (highest array index)
passband needs to be wider than size of numInput - not checking for that
"""
self.highpass = max(int(highHertz * self.buffersize / rate),1)
self.lowpass = min(int(lowHertz * self.buffersize / rate), self.buffersize/2 - 1)
"""
The call to create the temporal pooler region
"""
self.tp = TP(numberOfCols=self.numCols, cellsPerColumn=4,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=10, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.07,
activationThreshold=8,
globalDecay=0.02, burnIn=2,
checkSynapseConsistency=False,
pamLength=100)
"""
Creating the audio stream from our mic
"""
p = pyaudio.PyAudio()
self.inStream = p.open(format=pyaudio.paInt32,channels=1,rate=rate,input=True,frames_per_buffer=self.buffersize)
"""
Setting up the array that will handle the timeseries of audio data from our input
"""
self.audio = numpy.empty((self.buffersToRecord*self.buffersize),dtype="uint32")
"""
Print out the inputs
"""
print "Number of columns:\t" + str(self.numCols)
print "Max size of input:\t" + str(self.numInput)
print "Sampling rate (Hz):\t" + str(rate)
print "Passband filter (Hz):\t" + str(highHertz) + " - " + str(lowHertz)
print "Passband filter (bin):\t" + str(self.highpass) + " - " + str(self.lowpass)
print "Bin difference:\t\t" + str(self.lowpass - self.highpass)
print "Buffersize:\t\t" + str(self.buffersize)
"""
Setup the plot
Use the bandpass filter frequency range as the x-axis
Rescale the y-axis
"""
plt.ion()
bin = range(self.highpass,self.lowpass)
xs = numpy.arange(len(bin))*rate/self.buffersize + highHertz
self.freqPlot = plt.plot(xs,xs)[0]
plt.ylim(0, 10**12)
while True:
self.processAudio()
def processAudio (self):
"""
Sample audio, encode, send it to the TP
Pulls the audio from the mic
Conditions that audio as an SDR
Computes a prediction via the TP
Update the visualizations
"""
"""
Cycle through the multiples of the buffers we're sampling
Sample audio to store for each frame in buffersize
Mic voltage-level timeseries is saved as 32-bit binary
Convert that 32-bit binary into integers, and save to array for the FFT
"""
for i in range(self.buffersToRecord):
try:
audioString = self.inStream.read(self.buffersize)
except IOError:
print "Overflow error from 'audiostring = inStream.read(buffersize)'. Try decreasing buffersize."
quit()
self.audio[i*self.buffersize:(i + 1)*self.buffersize] = numpy.fromstring(audioString,dtype = "uint32")
"""
Get int array of strength for each bin of frequencies via fast fourier transform
Get the indices of the strongest frequencies (the top 'numInput')
Scale the indices so that the frequencies fit to within numCols
Pick out the unique indices (we've reduced the mapping, so we likely have multiples)
Encode those indices into an SDR via the BitmapArrayEncoder
Cast the SDR as a float for the TP
"""
ys = self.fft(self.audio, self.highpass, self.lowpass)
fs = numpy.sort(ys.argsort()[-self.numInput:])
rfs = fs.astype(numpy.float32) / (self.lowpass - self.highpass) * self.numCols
ufs = numpy.unique(rfs)
actualInt = self.e.encode(ufs)
actual = actualInt.astype(numpy.float32)
"""
Pass the SDR to the TP
Collect the prediction SDR from the TP
Pass the prediction & actual SDRS to the anomaly calculator & array comparer
Update the frequency plot
"""
self.tp.compute(actual, enableLearn = True, computeInfOutput = True)
predictedInt = self.tp.getPredictedState().max(axis=1)
compare = self.vis.compareArray(actualInt, predictedInt)
anomaly = self.vis.calcAnomaly(actualInt, predictedInt)
print "." . join(compare)
print self.vis.hashtagAnomaly(anomaly)
self.freqPlot.set_ydata(ys)
plt.show(block = False)
plt.draw()
def fft(self, audio, highpass, lowpass):
"""
Fast fourier transform conditioning
Output:
'output' contains the strength of each frequency in the audio signal
frequencies are marked by its position in 'output':
frequency = index * rate / buffesize
output.size = buffersize/2
Method:
Use numpy's FFT (numpy.fft.fft)
Find the magnitude of the complex numbers returned (abs value)
Split the FFT array in half, because we have mirror frequencies
(they're the complex conjugates)
Use just the first half to apply the bandpass filter
Great info here: http://stackoverflow.com/questions/4364823/how-to-get-frequency-from-fft-result
"""
left,right = numpy.split(numpy.abs(numpy.fft.fft(audio)),2)
output = left[highpass:lowpass]
return output
audiostream = AudioStream()
| gpl-3.0 |
clembou/PCWG | pcwg/configuration/power_curve_configuration.py | 1 | 2635 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 11 05:39:02 2016
@author: Stuart
"""
import base_configuration
import pandas as pd
from ..core.status import Status
class PowerCurveConfiguration(base_configuration.XmlBase):
def __init__(self, path = None):
if path != None:
self.isNew = False
doc = self.readDoc(path)
self.path = path
powerCurveNode = self.getNode(doc, 'PowerCurve')
self.name = self.getNodeValue(powerCurveNode, 'Name')
self.powerCurveDensity = self.getNodeFloat(powerCurveNode, 'PowerCurveDensity')
self.powerCurveTurbulence = self.getNodeFloat(powerCurveNode, 'PowerCurveTurbulence')
powerCurveDictionary = {}
for node in self.getNodes(powerCurveNode, 'PowerCurveLevel'):
speed = self.getNodeFloat(node, 'PowerCurveLevelWindSpeed')
power = self.getNodeFloat(node, 'PowerCurveLevelPower')
powerCurveDictionary[speed] = power
self.setPowerCurve(powerCurveDictionary)
else:
self.isNew = True
self.name = ""
self.powerCurveDensity = 1.225 #0.0
self.powerCurveTurbulence = 0.0
self.setPowerCurve()
def setPowerCurve(self, powerCurveDictionary = {}):
self.powerCurveDictionary = powerCurveDictionary
speeds, powers = [], []
for speed in self.powerCurveDictionary:
speeds.append(speed)
powers.append(self.powerCurveDictionary[speed])
if len(speeds) == 0:
self.powerCurveLevels = pd.Series()
else:
self.powerCurveLevels = pd.DataFrame(powers, index = speeds, columns = ['Specified Power'])
self.powerCurveLevels['Specified Turbulence'] = self.powerCurveTurbulence
def save(self):
Status.add("saving power curve")
doc = self.createDocument()
root = self.addRootNode(doc, "PowerCurve", "http://www.pcwg.org")
self.addTextNode(doc, root, "Name", self.name)
self.addFloatNode(doc, root, "PowerCurveDensity", self.powerCurveDensity)
self.addFloatNode(doc, root, "PowerCurveTurbulence", self.powerCurveTurbulence)
for speed in sorted(self.powerCurveDictionary):
power = self.powerCurveDictionary[speed]
levelNode = self.addNode(doc, root, "PowerCurveLevel")
self.addFloatNode(doc, levelNode, "PowerCurveLevelWindSpeed", speed)
self.addFloatNode(doc, levelNode, "PowerCurveLevelPower", power)
self.saveDocument(doc, self.path)
| mit |
xzh86/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
maheshakya/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
ashhher3/ibis | ibis/config.py | 16 | 20779 | # This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from contextlib import contextmanager
import pprint
import warnings
import sys
from six import StringIO
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "unicode_escape")
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
buf = StringIO()
pprint.pprint(self.d, stream=buf)
return buf.getvalue()
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError("Value must be one of %s"
% str("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
# is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
| apache-2.0 |
dilawar/ncbs-hippo | db/find_faculty.py | 2 | 1774 | """find_faculty.py:
Search intranet for faculy.
"""
__author__ = "Me"
__copyright__ = "Copyright 2016, Me"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Me"
__email__ = ""
__status__ = "Development"
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import urllib2
from bs4 import BeautifulSoup
import html2text
import re
def queryIntranet( query, page = 0 ):
url = "https://intranet.ncbs.res.in/people-search?%s&page=%s" %( query , page )
req = urllib2.Request( url )
response = urllib2.urlopen( req )
text = response.read( )
return BeautifulSoup( text, 'html.parser' )
def data( text ):
m = re.search( '\[(.+)\]', text )
if m:
return m.group(1)
else:
return ''
def main( ):
with open( "faculty.txt", "w" ) as f:
f.write( "fname,mname,lname,login,email\n")
for page in [ 0, 1, 2 ]:
soup = queryIntranet( "field_personal_group_tid=111", page )
for t in soup.find_all( 'tr' ):
t = str(t)
if "Faculty" in t:
fline = html2text.html2text( t )
faculty = filter(None, fline.split( '\n' ) )
name, email = filter(None, map( data, faculty ))[0:2]
name = name.split()
fname, mname, lname = name[0], " ".join(name[1:-2]), name[-1]
print email
with open( "faculty.txt", "a" ) as f:
f.write( "%s,%s,%s,%s,%s\n" % (fname, mname, lname
, email.split('@')[0], email ) )
print( "Wrote faculty names to faculty.txt" )
if __name__ == '__main__':
main()
| mit |
Arsey/keras-transfer-learning-for-oxford102 | server.py | 1 | 3793 | #!/usr/bin/env python
import socket
from threading import Thread
import numpy as np
import os
import argparse
import config
import util
from sklearn.externals import joblib
import traceback
from keras.applications.imagenet_utils import preprocess_input
import time
util.set_img_format()
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, help='Base model architecture',
choices=[config.MODEL_RESNET50,
config.MODEL_RESNET152,
config.MODEL_INCEPTION_V3,
config.MODEL_VGG16])
args = parser.parse_args()
config.model = args.model
model_module = util.get_model_class_instance()
model = model_module.load()
print('Model loaded')
print('Warming up the model')
start = time.clock()
if util.get_keras_backend_name() != 'tensorflow':
input_shape = (1, 3,) + model_module.img_size
else:
input_shape = (1, ) + model_module.img_size + (3, )
dummpy_img = np.ones(input_shape)
dummpy_img = preprocess_input(dummpy_img)
model.predict(dummpy_img)
end = time.clock()
print('Warming up took {} s'.format(end - start))
print('Trying to load a Novelty Detector')
try:
af = util.get_activation_function(model, model_module.noveltyDetectionLayerName)
print('Activation function is loaded')
novelty_detection_clf = joblib.load(config.get_novelty_detection_model_path())
print('Novelty Detection classifier is loaded')
except Exception as e:
print('Error on loading Novelty Detection classifier', e)
FILE_DOES_NOT_EXIST = '-1'
UNKNOWN_ERROR = '-2'
def handle(clientsocket):
while 1:
buf = clientsocket.recv(config.buffer_size)
if buf == 'exit'.encode():
return # client terminated connection
response = ''
if os.path.isfile(buf):
try:
img = [model_module.load_img(buf)]
out = model.predict(np.array(img))
prediction = np.argmax(out)
top10 = out[0].argsort()[-10:][::-1]
class_indices = dict(zip(config.classes, range(len(config.classes))))
keys = list(class_indices.keys())
values = list(class_indices.values())
answer = keys[values.index(prediction)]
try:
acts = util.get_activations(af, img)
predicted_relativity = novelty_detection_clf.predict(acts)[0]
nd_class = novelty_detection_clf.__classes[predicted_relativity]
except Exception as e:
print(e.message)
nd_class = 'related'
top10_json = "["
for i, t in enumerate(top10):
top10_json += '{"probability":"%s", "class":"%s"}%s' % (
out[0][t], keys[values.index(t)], '' if i == 9 else ',')
top10_json += "]"
response = '{"probability":"%s","class":"%s","relativity":"%s","top10":%s}' % (
out[0][prediction], answer, nd_class, top10_json)
print(response)
except Exception as e:
print('Error', e)
traceback.print_stack()
response = UNKNOWN_ERROR
else:
response = FILE_DOES_NOT_EXIST
clientsocket.sendall(response.encode())
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(config.server_address)
serversocket.listen(10000)
print('Ready for requests')
while 1:
# accept connections from outside
(clientsocket, address) = serversocket.accept()
ct = Thread(target=handle, args=(clientsocket,))
ct.run()
| mit |
pythonvietnam/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
Srisai85/sklearn_pycon2015 | notebooks/fig_code/ML_flow_chart.py | 61 | 4970 | """
Tutorial Diagrams
-----------------
This script plots the flow-charts used in the scikit-learn tutorials.
"""
import numpy as np
import pylab as pl
from matplotlib.patches import Circle, Rectangle, Polygon, Arrow, FancyArrow
def create_base(box_bg = '#CCCCCC',
arrow1 = '#88CCFF',
arrow2 = '#88FF88',
supervised=True):
fig = pl.figure(figsize=(9, 6), facecolor='w')
ax = pl.axes((0, 0, 1, 1),
xticks=[], yticks=[], frameon=False)
ax.set_xlim(0, 9)
ax.set_ylim(0, 6)
patches = [Rectangle((0.3, 3.6), 1.5, 1.8, zorder=1, fc=box_bg),
Rectangle((0.5, 3.8), 1.5, 1.8, zorder=2, fc=box_bg),
Rectangle((0.7, 4.0), 1.5, 1.8, zorder=3, fc=box_bg),
Rectangle((2.9, 3.6), 0.2, 1.8, fc=box_bg),
Rectangle((3.1, 3.8), 0.2, 1.8, fc=box_bg),
Rectangle((3.3, 4.0), 0.2, 1.8, fc=box_bg),
Rectangle((0.3, 0.2), 1.5, 1.8, fc=box_bg),
Rectangle((2.9, 0.2), 0.2, 1.8, fc=box_bg),
Circle((5.5, 3.5), 1.0, fc=box_bg),
Polygon([[5.5, 1.7],
[6.1, 1.1],
[5.5, 0.5],
[4.9, 1.1]], fc=box_bg),
FancyArrow(2.3, 4.6, 0.35, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.75, 4.2, 0.5, -0.2, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(5.5, 2.4, 0, -0.4, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(2.0, 1.1, 0.5, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(3.3, 1.1, 1.3, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2),
FancyArrow(6.2, 1.1, 0.8, 0, fc=arrow2,
width=0.25, head_width=0.5, head_length=0.2)]
if supervised:
patches += [Rectangle((0.3, 2.4), 1.5, 0.5, zorder=1, fc=box_bg),
Rectangle((0.5, 2.6), 1.5, 0.5, zorder=2, fc=box_bg),
Rectangle((0.7, 2.8), 1.5, 0.5, zorder=3, fc=box_bg),
FancyArrow(2.3, 2.9, 2.0, 0, fc=arrow1,
width=0.25, head_width=0.5, head_length=0.2),
Rectangle((7.3, 0.85), 1.5, 0.5, fc=box_bg)]
else:
patches += [Rectangle((7.3, 0.2), 1.5, 1.8, fc=box_bg)]
for p in patches:
ax.add_patch(p)
pl.text(1.45, 4.9, "Training\nText,\nDocuments,\nImages,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.6, 4.9, "Feature\nVectors",
ha='left', va='center', fontsize=14)
pl.text(5.5, 3.5, "Machine\nLearning\nAlgorithm",
ha='center', va='center', fontsize=14)
pl.text(1.05, 1.1, "New Text,\nDocument,\nImage,\netc.",
ha='center', va='center', fontsize=14)
pl.text(3.3, 1.7, "Feature\nVector",
ha='left', va='center', fontsize=14)
pl.text(5.5, 1.1, "Predictive\nModel",
ha='center', va='center', fontsize=12)
if supervised:
pl.text(1.45, 3.05, "Labels",
ha='center', va='center', fontsize=14)
pl.text(8.05, 1.1, "Expected\nLabel",
ha='center', va='center', fontsize=14)
pl.text(8.8, 5.8, "Supervised Learning Model",
ha='right', va='top', fontsize=18)
else:
pl.text(8.05, 1.1,
"Likelihood\nor Cluster ID\nor Better\nRepresentation",
ha='center', va='center', fontsize=12)
pl.text(8.8, 5.8, "Unsupervised Learning Model",
ha='right', va='top', fontsize=18)
def plot_supervised_chart(annotate=False):
create_base(supervised=True)
if annotate:
fontdict = dict(color='r', weight='bold', size=14)
pl.text(1.9, 4.55, 'X = vec.fit_transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(3.7, 3.2, 'clf.fit(X, y)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(1.7, 1.5, 'X_new = vec.transform(input)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
pl.text(6.1, 1.5, 'y_new = clf.predict(X_new)',
fontdict=fontdict,
rotation=20, ha='left', va='bottom')
def plot_unsupervised_chart():
create_base(supervised=False)
if __name__ == '__main__':
plot_supervised_chart(False)
plot_supervised_chart(True)
plot_unsupervised_chart()
pl.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/series/test_apply.py | 1 | 24677 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from collections import Counter, defaultdict, OrderedDict
from itertools import chain
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isna)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
class TestSeriesApply():
def test_apply(self, datetime_series):
with np.errstate(all='ignore'):
tm.assert_series_equal(datetime_series.apply(np.sqrt),
np.sqrt(datetime_series))
# element-wise apply
import math
tm.assert_series_equal(datetime_series.apply(math.exp),
np.exp(datetime_series))
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
assert result[0] == ['foo', 'bar']
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'Period[M]'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(np.random.randn(10, 3),
columns=['A', 'B', 'C'],
index=pd.date_range('1/1/2000', periods=10))
with tm.assert_produces_warning(FutureWarning):
tsdf.A.agg({'foo': ['sum', 'mean']})
class TestSeriesAggregate():
def test_transform(self, string_series):
# transforming functions
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.transform(np.sqrt)
expected = f_sqrt.copy()
assert_series_equal(result, expected)
result = string_series.apply(np.sqrt)
assert_series_equal(result, expected)
# list-like
result = string_series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ['sqrt']
assert_frame_equal(result, expected)
result = string_series.transform([np.sqrt])
assert_frame_equal(result, expected)
result = string_series.transform(['sqrt'])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['sqrt', 'absolute']
result = string_series.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = string_series.transform(['sqrt', 'abs'])
expected.columns = ['sqrt', 'abs']
assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['foo', 'bar']
expected = expected.unstack().rename('series')
result = string_series.apply({'foo': np.sqrt, 'bar': np.abs})
assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
def f():
string_series.transform(['min', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
string_series.agg(['sqrt', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
string_series.transform(['sqrt', 'max'])
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
string_series.agg({'foo': np.sqrt, 'bar': 'sum'})
pytest.raises(ValueError, f)
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype='int64', name='series')
result = s.agg(['min', 'max'])
expected = Series([0, 5], index=['min', 'max'], name='series')
tm.assert_series_equal(result, expected)
result = s.agg({'foo': 'min'})
expected = Series([0], index=['foo'], name='series')
tm.assert_series_equal(result, expected)
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max']})
expected = DataFrame(
{'foo': [0, 5]},
index=['min', 'max']).unstack().rename('series')
tm.assert_series_equal(result, expected)
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype='int64', name='series')
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max'], 'bar': ['sum', 'mean']})
expected = DataFrame(
{'foo': [5.0, np.nan, 0.0, np.nan],
'bar': [np.nan, 2.5, np.nan, 15.0]},
columns=['foo', 'bar'],
index=['max', 'mean',
'min', 'sum']).unstack().rename('series')
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': datetime_series,
'x^2': datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(OrderedDict(
[('count', 'count'),
('mean', 'mean'),
('std', 'std'),
('min', 'min'),
('25%', lambda x: x.quantile(0.25)),
('50%', 'median'),
('75%', lambda x: x.quantile(0.75)),
('max', 'max')]))
assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(['sum', 'mean'])
expected = Series([string_series.sum(),
string_series.mean()],
['sum', 'mean'],
name=string_series.name)
assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg('size')
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(['size', 'count', 'mean'])
expected = Series(OrderedDict([('size', 3.0),
('count', 2.0),
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize("series, func, expected", chain(
_get_cython_table_params(Series(), [
('sum', 0),
('max', np.nan),
('min', np.nan),
('all', True),
('any', False),
('mean', np.nan),
('prod', 1),
('std', np.nan),
('var', np.nan),
('median', np.nan),
]),
_get_cython_table_params(Series([np.nan, 1, 2, 3]), [
('sum', 6),
('max', 3),
('min', 1),
('all', True),
('any', True),
('mean', 2),
('prod', 6),
('std', 1),
('var', 1),
('median', 2),
]),
_get_cython_table_params(Series('a b c'.split()), [
('sum', 'abc'),
('max', 'c'),
('min', 'a'),
('all', 'c'), # see GH12863
('any', 'a'),
]),
))
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if tm.is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize("series, func, expected", chain(
_get_cython_table_params(Series(), [
('cumprod', Series([], Index([]))),
('cumsum', Series([], Index([]))),
]),
_get_cython_table_params(Series([np.nan, 1, 2, 3]), [
('cumprod', Series([np.nan, 1, 2, 6])),
('cumsum', Series([np.nan, 1, 3, 6])),
]),
_get_cython_table_params(Series('a b c'.split()), [
('cumsum', Series(['a', 'ab', 'abc'])),
]),
))
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("series, func, expected", chain(
_get_cython_table_params(Series('a b c'.split()), [
('mean', TypeError), # mean raises TypeError
('prod', TypeError),
('std', TypeError),
('var', TypeError),
('median', TypeError),
('cumprod', TypeError),
])
))
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
with pytest.raises(expected):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
class TestSeriesMap():
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
tm.assert_series_equal(a.map(c), exp)
@pytest.mark.parametrize("index", tm.all_index_generator(10))
def test_map_empty(self, index):
s = Series(index)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged['d'])
assert not isna(merged['c'])
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]})
label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'],
check_names=False)
def test_map_counter(self):
s = Series(['a', 'b', 'c'], index=[1, 2, 3])
counter = Counter()
counter['b'] = 5
counter['c'] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = s.map(default_dict)
expected = Series(['stuff', 'blank', 'blank'], index=['a', 'b', 'c'])
assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return 'missing'
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: 'three'})
result = s.map(dictionary)
expected = Series(['missing', 'missing', 'three'])
assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: 'three'})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, 'three'])
assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'Period[M]'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(exp_values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
def test_map_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("vals,mapping,exp", [
(list('abc'), {np.nan: 'not NaN'}, [np.nan] * 3 + ['not NaN']),
(list('abc'), {'a': 'a letter'}, ['a letter'] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3)])
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
| bsd-3-clause |
dupontke/unique_angle_calc | unique_angle_calc_plotting.py | 1 | 2374 | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# PREAMBLE:
import numpy as np
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib as mpl
from plotting_functions import *
from sel_list import *
zeros = np.zeros
dat = sys.argv[1]
system = sys.argv[2]
nSel = len(sel)
bin_size = 0.01
k = 0.001987 # Kcal K^-1 mol^-1
T = 300. # K
kT = k*T
boltz = 2*kT
four_pi = 4*np.pi
# ----------------------------------------
# MAIN PROGRAM:
# ----------------------------------------
# Load in data_file into a numpy array
datalist = np.loadtxt(dat)
nSteps = len(datalist[:,0])
print 'Number of selections: %d, number of steps: %d' %(nSel,nSteps)
time = np.zeros(nSteps)
for i in range(nSteps):
time[i] = i*0.002 # units of time in ns; each frame is separated by 0.002 ns
rows = datalist.shape[0]
columns = datalist.shape[1]
if columns != nSel:
sys.exit()
print "Number of data columns:", columns
print "Number of data rows:", rows
for i in range(nSel):
selection = sel[i][3]
scat_hist(time[:],datalist[:,i],'k','Time (ns)','Angle','%02d.%s.%s' %(i,selection,system),'angle',yunits='$\deg$')
hist1d(datalist[:,i],'Angle','%02d.%s.%s' %(i,selection,system),'angle',norm=True,xunits='$\deg$')
# Loop through each column and create a histogram and the probability density
out1 = open('%02d.%s.%s.unique_angle_calc.prob_density_hist.dat' %(i,selection,system),'w')
# determine domain of data
max_val = np.amax(datalist[:,i])
min_val = np.amin(datalist[:,i])
num_bins = int((max_val-min_val)/bin_size)+1
# allocate probability and x2 arrays
prob = zeros((num_bins),dtype=np.float)
half_bin = zeros((num_bins),dtype=np.float)
# create histogram of data
for j in range(rows):
current_bin = int((datalist[j,i] - min_val)/bin_size)
prob[current_bin] += 1.
# finish probability density
prob /= bin_size*rows
# obtain x_axis values for scatter plot
for j in range(num_bins):
half_bin[j] = min_val+(j*bin_size)+(bin_size/2)
# write out to prob_density_hist.dat file
for j in range(num_bins):
out1.write('%10.6f \n' %(prob[j]))
out1.close()
# scatter plot for probability density
plot_1d(half_bin,prob,'k','Angle','Probability Density','%02d.%s.%s' %(i,selection,system),'angle_prob_density',xunits='$\deg$')
| gpl-3.0 |
rodrigo-e/Graphic-Tool-Research | km.py | 1 | 17727 | from __future__ import division
import numpy as np
from collections import defaultdict
import json
import itertools
from sklearn import cluster, preprocessing, manifold
from datetime import datetime
import sys
class KeplerMapper(object):
# With this class you can build topological networks from (high-dimensional) data.
#
# 1) Fit a projection/lens/function to a dataset and transform it.
# For instance "mean_of_row(x) for x in X"
# 2) Map this projection with overlapping intervals/hypercubes.
# Cluster the points inside the interval
# (Note: we cluster on the inverse image/original data to lessen projection loss).
# If two clusters/nodes have the same members (due to the overlap), then:
# connect these with an edge.
# 3) Visualize the network using HTML and D3.js.
#
# functions
# ---------
# fit_transform: Create a projection (lens) from a dataset
# map: Apply Mapper algorithm on this projection and build a simplicial complex
# visualize: Turns the complex dictionary into a HTML/D3.js visualization
def __init__(self, verbose=2):
self.verbose = verbose
self.chunk_dist = []
self.overlap_dist = []
self.d = []
self.nr_cubes = 0
self.overlap_perc = 0
self.clusterer = False
def fit_transform(self, X, projection="sum", scaler=preprocessing.MinMaxScaler()):
# Creates the projection/lens from X.
#
# Input: X. Input features as a numpy array.
# Output: projected_X. original data transformed to a projection (lens).
#
# parameters
# ----------
# projection: Projection parameter is either a string,
# a scikit class with fit_transform, like manifold.TSNE(),
# or a list of dimension indices.
# scaler: if None, do no scaling, else apply scaling to the projection
# Default: Min-Max scaling
self.scaler = scaler
self.projection = str(projection)
# Detect if projection is a class (for scikit-learn)
if str(type(projection))[1:6] == "class": #TODO: de-ugly-fy
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose":self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n"%str(projection))
X = reducer.fit_transform(X)
# Detect if projection is a string (for standard functions)
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s"%(projection))
# Stats lenses
if projection == "sum": # sum of row
X = np.sum(X, axis=1).reshape((X.shape[0],1))
if projection == "mean": # mean of row
X = np.mean(X, axis=1).reshape((X.shape[0],1))
if projection == "median": # mean of row
X = np.median(X, axis=1).reshape((X.shape[0],1))
if projection == "max": # max of row
X = np.max(X, axis=1).reshape((X.shape[0],1))
if projection == "min": # min of row
X = np.min(X, axis=1).reshape((X.shape[0],1))
if projection == "std": # std of row
X = np.std(X, axis=1).reshape((X.shape[0],1))
if projection == "dist_mean": # Distance of x to mean of X
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean)**2), axis=1).reshape((X.shape[0],1))
# Detect if projection is a list (with dimension indices)
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s"%(str(projection)))
X = X[:,np.array(projection)]
# Scaling
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n"%str(scaler))
X = scaler.fit_transform(X)
return X
def map(self, projected_X, inverse_X=None, clusterer=cluster.DBSCAN(eps=0.5,min_samples=3), nr_cubes=10, overlap_perc=0.1):
# This maps the data to a simplicial complex. Returns a dictionary with nodes and links.
#
# Input: projected_X. A Numpy array with the projection/lens.
# Output: complex. A dictionary with "nodes", "links" and "meta information"
#
# parameters
# ----------
# projected_X projected_X. A Numpy array with the projection/lens. Required.
# inverse_X Numpy array or None. If None then the projection itself is used for clustering.
# clusterer Scikit-learn API compatible clustering algorithm. Default: DBSCAN
# nr_cubes Int. The number of intervals/hypercubes to create.
# overlap_perc Float. The percentage of overlap "between" the intervals/hypercubes.
start = datetime.now()
# Helper function
def cube_coordinates_all(nr_cubes, nr_dimensions):
# Helper function to get origin coordinates for our intervals/hypercubes
# Useful for looping no matter the number of cubes or dimensions
# Example: if there are 4 cubes per dimension and 3 dimensions
# return the bottom left (origin) coordinates of 64 hypercubes,
# as a sorted list of Numpy arrays
# TODO: elegance-ify...
l = []
for x in range(nr_cubes):
l += [x] * nr_dimensions
return [np.array(list(f)) for f in sorted(set(itertools.permutations(l,nr_dimensions)))]
nodes = defaultdict(list)
links = defaultdict(list)
complex = {}
self.nr_cubes = nr_cubes
self.clusterer = clusterer
self.overlap_perc = overlap_perc
if self.verbose > 0:
print("Mapping on data shaped %s using dimensions\n"%(str(projected_X.shape)))
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if inverse_X is None:
inverse_X = projected_X
# We chop up the min-max column ranges into 'nr_cubes' parts
self.chunk_dist = (np.max(projected_X, axis=0) - np.min(projected_X, axis=0))/nr_cubes
# We calculate the overlapping windows distance
self.overlap_dist = self.overlap_perc * self.chunk_dist
# We find our starting point
self.d = np.min(projected_X, axis=0)
# Use a dimension index array on the projected X
# (For now this uses the entire dimensionality, but we keep for experimentation)
di = np.array([x for x in range(projected_X.shape[1])])
# Prefix'ing the data with ID's
ids = np.array([x for x in range(projected_X.shape[0])])
projected_X = np.c_[ids,projected_X]
inverse_X = np.c_[ids,inverse_X]
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
total_cubes = len(cube_coordinates_all(nr_cubes,projected_X.shape[1]))
print("Creating %s hypercubes."%total_cubes)
for i, coor in enumerate(cube_coordinates_all(nr_cubes,di.shape[0])):
# Slice the hypercube
hypercube = projected_X[ np.invert(np.any((projected_X[:,di+1] >= self.d[di] + (coor * self.chunk_dist[di])) &
(projected_X[:,di+1] < self.d[di] + (coor * self.chunk_dist[di]) + self.chunk_dist[di] + self.overlap_dist[di]) == False, axis=1 )) ]
if self.verbose > 1:
print("There are %s points in cube_%s / %s with starting range %s"%
(hypercube.shape[0],i,total_cubes,self.d[di] + (coor * self.chunk_dist[di])))
# If at least one sample inside the hypercube
if hypercube.shape[0] > 0:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
inverse_x = inverse_X[[int(nn) for nn in hypercube[:,0]]]
clusterer.fit(inverse_x[:,1:])
if self.verbose > 1:
print("Found %s clusters in cube_%s\n"%(np.unique(clusterer.labels_[clusterer.labels_ > -1]).shape[0],i))
#Now for every (sample id in cube, predicted cluster label)
for a in np.c_[hypercube[:,0],clusterer.labels_]:
if a[1] != -1: #if not predicted as noise
cluster_id = str(coor[0])+"_"+str(i)+"_"+str(a[1])+"_"+str(coor)+"_"+str(self.d[di] + (coor * self.chunk_dist[di])) # TODO: de-rudimentary-ify
nodes[cluster_id].append( int(a[0]) ) # Append the member id's as integers
else:
if self.verbose > 1:
print("Cube_%s is empty.\n"%(i))
# Create links when clusters from different hypercubes have members with the same sample id.
candidates = itertools.combinations(nodes.keys(),2)
for candidate in candidates:
# if there are non-unique members in the union
if len(nodes[candidate[0]]+nodes[candidate[1]]) != len(set(nodes[candidate[0]]+nodes[candidate[1]])):
links[candidate[0]].append( candidate[1] )
# Reporting
if self.verbose > 0:
nr_links = 0
for k in links:
nr_links += len(links[k])
print("\ncreated %s edges and %s nodes in %s."%(nr_links,len(nodes),str(datetime.now()-start)))
complex["nodes"] = nodes
complex["links"] = links
complex["meta"] = self.projection
return complex
def visualize(self, complex, color_function="", path_html="mapper_visualization_output.html", title="My Data",
graph_link_distance=30, graph_gravity=0.1, graph_charge=-120, custom_tooltips=None, width_html=0,
height_html=0, show_tooltips=True, show_title=True, show_meta=True):
# Turns the dictionary 'complex' in a html file with d3.js
#
# Input: complex. Dictionary (output from calling .map())
# Output: a HTML page saved as a file in 'path_html'.
#
# parameters
# ----------
# color_function string. Not fully implemented. Default: "" (distance to origin)
# path_html file path as string. Where to save the HTML page.
# title string. HTML page document title and first heading.
# graph_link_distance int. Edge length.
# graph_gravity float. "Gravity" to center of layout.
# graph_charge int. charge between nodes.
# custom_tooltips None or Numpy Array. You could use "y"-label array for this.
# width_html int. Width of canvas. Default: 0 (full width)
# height_html int. Height of canvas. Default: 0 (full height)
# show_tooltips bool. default:True
# show_title bool. default:True
# show_meta bool. default:True
# Format JSON for D3 graph
json_s = {}
json_s["nodes"] = []
json_s["links"] = []
k2e = {} # a key to incremental int dict, used for id's when linking
for e, k in enumerate(complex["nodes"]):
# Tooltip and node color formatting, TODO: de-mess-ify
if custom_tooltips is not None:
tooltip_s = "<h2>Cluster %s</h2>"%k + " ".join([str(f) for f in custom_tooltips[complex["nodes"][k]]])
if color_function == "average_signal_cluster":
tooltip_i = int(((sum([f for f in custom_tooltips[complex["nodes"][k]]]) / len(custom_tooltips[complex["nodes"][k]])) * 30) )
json_s["nodes"].append({"name": str(k), "tooltip": tooltip_s, "group": 2 * int(np.log(len(complex["nodes"][k]))), "color": str(tooltip_i)})
else:
json_s["nodes"].append({"name": str(k), "tooltip": tooltip_s, "group": 2 * int(np.log(len(complex["nodes"][k]))), "color": str(k.split("_")[0])})
else:
tooltip_s = "<h2>Cluster %s</h2>Contains %s members."%(k,len(complex["nodes"][k]))
json_s["nodes"].append({"name": str(k), "tooltip": tooltip_s, "group": 2 * int(np.log(len(complex["nodes"][k]))), "color": str(k.split("_")[0])})
k2e[k] = e
for k in complex["links"]:
for link in complex["links"][k]:
json_s["links"].append({"source": k2e[k], "target":k2e[link],"value":1})
# Width and height of graph in HTML output
if width_html == 0:
width_css = "100%"
width_js = 'document.getElementById("holder").offsetWidth-20'
else:
width_css = "%spx" % width_html
width_js = "%s" % width_html
if height_html == 0:
height_css = "100%"
height_js = 'document.getElementById("holder").offsetHeight-20'
else:
height_css = "%spx" % height_html
height_js = "%s" % height_html
# Whether to show certain UI elements or not
if show_tooltips == False:
tooltips_display = "display: none;"
else:
tooltips_display = ""
if show_meta == False:
meta_display = "display: none;"
else:
meta_display = ""
if show_title == False:
title_display = "display: none;"
else:
title_display = ""
with open(path_html,"wb") as outfile:
html = """<!DOCTYPE html>
<meta charset="utf-8">
<meta name="generator" content="KeplerMapper">
<title>%s | KeplerMapper</title>
<link href='https://fonts.googleapis.com/css?family=Roboto:700,300' rel='stylesheet' type='text/css'>
<style>
* {margin: 0; padding: 0;}
html { height: 100%%;}
body {background: #111; height: 100%%; font: 100 16px Roboto, Sans-serif;}
.link { stroke: #999; stroke-opacity: .333; }
.divs div { border-radius: 50%%; background: red; position: absolute; }
.divs { position: absolute; top: 0; left: 0; }
#holder { position: relative; width: %s; height: %s; background: #111; display: block;}
h1 { %s padding: 20px; color: #fafafa; text-shadow: 0px 1px #000,0px -1px #000; position: absolute; font: 300 30px Roboto, Sans-serif;}
h2 { text-shadow: 0px 1px #000,0px -1px #000; font: 700 16px Roboto, Sans-serif;}
.meta { position: absolute; opacity: 0.9; width: 220px; top: 80px; left: 20px; display: block; %s background: #000; line-height: 25px; color: #fafafa; border: 20px solid #000; font: 100 16px Roboto, Sans-serif;}
div.tooltip { position: absolute; width: 380px; display: block; %s padding: 20px; background: #000; border: 0px; border-radius: 3px; pointer-events: none; z-index: 999; color: #FAFAFA;}
}
</style>
<body>
<div id="holder">
<h1>%s</h1>
<p class="meta">
<b>Lens</b><br>%s<br><br>
<b>Cubes per dimension</b><br>%s<br><br>
<b>Overlap percentage</b><br>%s%%<br><br>
<b>Color Function</b><br>%s( %s )<br><br>
<b>Clusterer</b><br>%s<br><br>
<b>Scaler</b><br>%s
</p>
</div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script>
var width = %s,
height = %s;
var color = d3.scale.ordinal()
.domain(["0","1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"])
.range(["#FF0000","#FF1400","#FF2800","#FF3c00","#FF5000","#FF6400","#FF7800","#FF8c00","#FFa000","#FFb400","#FFc800","#FFdc00","#FFf000","#fdff00","#b0ff00","#65ff00","#17ff00","#00ff36","#00ff83","#00ffd0","#00e4ff","#00c4ff","#00a4ff","#00a4ff","#0084ff","#0064ff","#0044ff","#0022ff","#0002ff","#0100ff","#0300ff","#0500ff"]);
var force = d3.layout.force()
.charge(%s)
.linkDistance(%s)
.gravity(%s)
.size([width, height]);
var svg = d3.select("#holder").append("svg")
.attr("width", width)
.attr("height", height);
var div = d3.select("#holder").append("div")
.attr("class", "tooltip")
.style("opacity", 0.0);
var divs = d3.select('#holder').append('div')
.attr('class', 'divs')
.attr('style', function(d) { return 'overflow: hidden; width: ' + width + 'px; height: ' + height + 'px;'; });
graph = %s;
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value); });
var node = divs.selectAll('div')
.data(graph.nodes)
.enter().append('div')
.on("mouseover", function(d) {
div.transition()
.duration(200)
.style("opacity", .9);
div .html(d.tooltip + "<br/>")
.style("left", (d3.event.pageX + 100) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function(d) {
div.transition()
.duration(500)
.style("opacity", 0);
})
.call(force.drag);
node.append("title")
.text(function(d) { return d.name; });
force.on("tick", function() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; })
.attr('style', function(d) { return 'width: ' + (d.group * 2) + 'px; height: ' + (d.group * 2) + 'px; ' + 'left: '+(d.x-(d.group))+'px; ' + 'top: '+(d.y-(d.group))+'px; background: '+color(d.color)+'; box-shadow: 0px 0px 3px #111; box-shadow: 0px 0px 33px '+color(d.color)+', inset 0px 0px 5px rgba(0, 0, 0, 0.2);'})
;
});
</script>"""%(title,width_css, height_css, title_display, meta_display, tooltips_display, title,complex["meta"],self.nr_cubes,self.overlap_perc*100,color_function,complex["meta"],str(self.clusterer),str(self.scaler),width_js,height_js,graph_charge,graph_link_distance,graph_gravity,json.dumps(json_s))
outfile.write(html.encode("utf-8"))
if self.verbose > 0:
print("\nWrote d3.js graph to '%s'"%path_html) | mit |
Fusion-Data-Platform/fdf | fdf/factory.py | 2 | 39038 | # -*- coding: utf-8 -*-
"""
Root module for the FDF package.
**Classes**
* Machine - root class for the FDF package
* Shot - shot container class
* Logbook - logbook connection class
* Container - diagnostic container class
* Node - mdsplus signal node class
"""
"""
Created on Thu Jun 18 10:38:40 2015
@author: ktritz
"""
import xml.etree.ElementTree as ET
import sys, os, importlib
import fdf_globals
from fdf_signal import Signal
import numpy as np
import datetime as dt
#import modules # I think this import is unused - DRS 10/17/15
from collections import MutableMapping, Mapping
import MDSplus as mds
import types
import inspect
import pymssql
import matplotlib.pyplot as plt
FDF_DIR = fdf_globals.FDF_DIR
MDS_SERVERS = fdf_globals.MDS_SERVERS
EVENT_SERVERS = fdf_globals.EVENT_SERVERS
LOGBOOK_CREDENTIALS = fdf_globals.LOGBOOK_CREDENTIALS
FdfError = fdf_globals.FdfError
machineAlias = fdf_globals.machineAlias
class Machine(MutableMapping):
"""
Factory root class that contains shot objects and MDS access methods.
Note that fdf.factory.Machine is exposed in fdf.__init__, so fdf.Machine
is valid.
**Usage**::
>>> import fdf
>>> nstx = fdf.Machine('nstx')
>>> nstx.s140000.logbook()
>>> nstx.addshots(xp=1048)
>>> nstx.s140000.mpts.plot()
>>> nstx.listshot()
Machine class contains a model shot object: nstx.s0
Shot data can be accessed directly through the Machine class::
>>> nstx.s141398
>>> nstx.s141399
Alternatively, a list of shot #'s may be provided during initialization::
>>> nstx = Machine(name='nstx', shotlist=[141398, 141399])
Or added later using the method addshot()::
>>> nstx.addshot([141398, 141399])
"""
# Maintain a dictionary of cached MDS server connections to speed up
# access for multiple shots and trees. This is a static class variable
# to avoid proliferation of MDS server connections
_connections = []
_parent = None
_modules = None
def __init__(self, name='nstx', shotlist=[], xp=[], date=[]):
self._shots = {} # shot dictionary with shot number (int) keys
self._classlist = {} # unused as of 10/14/2015, DRS
self._name = machineAlias(name)
self._logbook = Logbook(name=self._name, root=self)
self.s0 = Shot(0, root=self, parent=self)
self._eventConnection = mds.Connection(EVENT_SERVERS[self._name])
if len(self._connections) is 0:
print('Precaching MDS server connections...')
for _ in range(2):
try:
connection = mds.Connection(MDS_SERVERS[self._name])
connection.tree = None
self._connections.append(connection)
except:
msg = 'MDSplus connection to {} failed'.format(
MDS_SERVERS[self._name])
raise FdfError(msg)
print('Finished.')
if shotlist or xp or date:
self.addshot(shotlist=shotlist, xp=xp, date=date)
def __getattr__(self, name):
# used for attribute referencing: s = nstx.s140000
try:
shot = int(name.split('s')[1])
if (shot not in self._shots):
self._shots[shot] = Shot(shot, root=self, parent=self)
return self._shots[shot]
except:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), name))
def __repr__(self):
return '<machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, value):
return value in self._shots
def __len__(self):
return len(self._shots.keys())
def __delitem__(self, item):
self._shots.__delitem__(item)
def __getitem__(self, item):
# used for dictionary referencing: s = nstx[140000]
# note that getitem fails to catch missing key,
# but getattr does catch missing key
if item == 0:
return self.s0
return self._shots[item]
def __setitem__(self, item, value):
pass
def __dir__(self):
d = ['s0']
d.extend(['s{}'.format(shot) for shot in self._shots])
return d
def _get_connection(self, shot, tree):
for connection in self._connections:
if connection.tree == (tree, shot):
self._connections.remove(connection)
self._connections.insert(0, connection)
return connection
connection = self._connections.pop()
try:
connection.closeAllTrees()
except:
pass
try:
connection.openTree(tree, shot)
connection.tree = (tree, shot)
except:
connection.tree = (None, None)
finally:
self._connections.insert(0, connection)
return connection
def _get_mdsdata(self, signal):
# shot = base_container(signal)._parent.shot
shot = signal.shot
if shot is 0:
print('No MDS data exists for model tree')
return None
connection = self._get_connection(shot, signal._mdstree)
try:
data = connection.get(signal._mdsnode)
except:
msg = "MDSplus connection error for tree '{}' and node '{}'".format(
signal._mdstree, signal._mdsnode)
raise FdfError(msg)
try:
if signal._raw_of is not None:
data = data.raw_of()
except:
pass
try:
if signal._dim_of is not None:
data = data.dim_of()
except:
pass
data = data.value_of().value
try:
if signal._transpose is not None:
data = data.transpose(signal._transpose)
except:
pass
try:
data = signal._postprocess(data)
except:
pass
return data
def _get_modules(self):
if self._modules is None:
module_dir = os.path.join(FDF_DIR, 'modules')
self._modules = [module for module in os.listdir(module_dir)
if os.path.isdir(os.path.join(module_dir, module)) and
module[0] is not '_']
return self._modules
def addshot(self, shotlist=[], date=[], xp=[], verbose=False):
"""
Load shots into the Machine class
**Usage**
>>> nstx.addshot([140000 140001])
>>> nstx.addshot(xp=1032)
>>> nstx.addshot(date=20100817, verbose=True)
Note: You can reference shots even if the shots have not been loaded.
"""
if not iterable(shotlist):
shotlist = [shotlist]
if not iterable(xp):
xp = [xp]
if not iterable(date):
date = [date]
shots = []
if shotlist:
shots.extend([shotlist])
if xp:
shots.extend(self._logbook.get_shotlist(xp=xp,
verbose=verbose))
if date:
shots.extend(self._logbook.get_shotlist(date=date,
verbose=verbose))
for shot in np.unique(shots):
if shot not in self._shots:
self._shots[shot] = Shot(shot, root=self, parent=self)
def addxp(self, xp=[], verbose=False):
"""
Add all shots for one or more XPx
**Usage**
>>> nstx.addxp(1032)
>>> nstx.addxp(xp=1013)
>>> nstx.addxp([1042, 1016])
"""
self.addshot(xp=xp, verbose=verbose)
def adddate(self, date=[], verbose=False):
"""
Add all shots for one or more dates (format YYYYMMDD)
**Usage**
>>> nstx.adddate(date=20100817)
"""
self.addshot(date=date, verbose=verbose)
def list_shots(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
print('{} in XP {} on {}'.format(
shotObj.shot, shotObj.xp, shotObj.date))
def get_shotlist(self, date=[], xp=[], verbose=False):
"""
Get a list of shots
**Usage**
>>> shots = nstx.get_shotlist(xp=1013)
"""
return self._logbook.get_shotlist(date=date, xp=xp, verbose=verbose)
def filter_shots(self, date=[], xp=[]):
"""
Get a Machine-like object with an immutable shotlist for XP(s)
or date(s)
"""
self.addshot(xp=xp, date=date)
return ImmutableMachine(xp=xp, date=date, parent=self)
def setevent(self, event, shot_number=None, data=None):
event_data = bytearray()
if shot_number is not None:
shot_data = shot_number // 256**np.arange(4) % 256
event_data.extend(shot_data.astype(np.ubyte))
if data is not None:
event_data.extend(str(data))
mdsdata = mds.mdsdata.makeData(np.array(event_data))
event_string = 'setevent("{}", {})'.format(event, mdsdata)
status = self._eventConnection.get(event_string)
return status
def wfevent(self, event, timeout=0):
event_string = 'kind(_data=wfevent("{}",*,{})) == 0BU ? "timeout"' \
': _data'.format(event, timeout)
data = self._eventConnection.get(event_string).value
if type(data) is str:
raise FdfError('Timeout after {}s in wfevent'.format(timeout))
if not data.size:
return None
if data.size > 3:
shot_data = data[0:4]
shot_number = np.sum(shot_data * 256**np.arange(4))
data = data[4:]
return shot_number, ''.join(map(chr, data))
return data
def logbook(self):
"""
Print logbook entries for all shots
"""
for shotnum in self._shots:
shotObj = self._shots[shotnum]
shotObj.logbook()
class ImmutableMachine(Mapping):
def __init__(self, xp=[], date=[], parent=None):
self._shots = {}
self._parent = parent
shotlist = self._parent.get_shotlist(xp=xp, date=date)
for shot in shotlist:
self._shots[shot] = getattr(self._parent, 's{}'.format(shot))
def __getattr__(self, name):
try:
shot = int(name.split('s')[1])
return self._shots[shot]
except:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), name))
def __repr__(self):
return '<immutable machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, value):
return value in self._shots
def __len__(self):
return len(self._shots.keys())
def __getitem__(self, item):
pass
def __dir__(self):
return ['s{}'.format(shot) for shot in self._shots]
def logbook(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
shotObj.logbook()
def list_shots(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
print('{} in XP {} on {}'.format(
shotObj.shot, shotObj.xp, shotObj.date))
class Shot(MutableMapping):
def __init__(self, shot, root=None, parent=None):
self.shot = shot
self._shotobj = self
self._root = root
self._parent = parent
try:
self._logbook = self._root._logbook
except:
txt = 'No logbook connection for shot {}'.format(self.shot)
raise FdfError(txt)
self._logbook_entries = []
self._modules = {module: None for module in root._get_modules()}
self.xp = self._get_xp()
self.date = self._get_date()
self._efits = []
def __getattr__(self, attribute):
# first see if the attribute is in the Machine object
try:
attr = getattr(self._parent, attribute)
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
except:
pass # failed, so check other locations
if attribute in self._modules:
if self._modules[attribute] is None:
self._modules[attribute] = Factory(attribute, root=self._root,
shot=self.shot, parent=self)
return self._modules[attribute]
raise AttributeError("Shot object has no attribute '{}'".format(attribute))
def __repr__(self):
return '<Shot {}>'.format(self.shot)
def __iter__(self):
# return iter(self._modules.values())
return iter(self._modules)
def __contains__(self, value):
return value in self._modules
def __len__(self):
return len(self._modules.keys())
def __delitem__(self, item):
pass
def __getitem__(self, item):
return self._modules[item]
def __setitem__(self, item, value):
pass
def __dir__(self):
return self._modules.keys()
def _get_xp(self):
# query logbook for XP, return XP (list if needed)
if self._logbook and not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
xplist = []
for entry in self._logbook_entries:
xplist.append(entry['xp'])
return np.unique(xplist)
def _get_date(self):
# query logbook for rundate, return rundate
if self._logbook and not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
date = 0
if self._logbook_entries:
date = self._logbook_entries[0]['rundate']
return date
def logbook(self):
# print a list of logbook entries
print('Logbook entries for {}'.format(self.shot))
if not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
for entry in self._logbook_entries:
print('************************************')
print(('{shot} on {rundate} in XP {xp}\n'
'{username} in topic {topic}\n\n'
'{text}').format(**entry))
print('************************************')
def plot(self, overwrite=False, label=None, multi=False):
if not overwrite and not multi:
plt.figure()
plt.subplot(1, 1, 1)
if self.shape != self.time.shape:
msg = 'Dimension mismatch: {}\n shape data {} shape time ()'.format(
self._name, self.shape, self.time.shape)
raise FdfError(msg)
if self.size==0 or self.time.size==0:
msg = 'Empty data and/or time axis: {}\n shape data {} shape time {}'.format(
self._name, self.shape, self.time.shape)
raise FdfError(msg)
plt.plot(self.time[:], self[:], label=label)
title = self._title if self._title else self._name
if not overwrite or multi:
plt.suptitle('Shot #{}'.format(self.shot), x=0.5, y=1.00,
fontsize=12, horizontalalignment='center')
plt.ylabel('{} ({})'.format(self._name.upper(), self.units))
plt.title('{} {}'.format(self._container.upper(), title),
fontsize=12)
plt.xlabel('{} ({})'.format(self.time._name.capitalize(),
self.time.units))
plt.legend()
plt.show()
def check_efit(self):
if len(self._efits):
return self._efits
trees = ['efit{}'.format(str(index).zfill(2)) for index in range(1, 7)]
trees.extend(['lrdfit{}'.format(str(index).zfill(2))
for index in range(1, 13)])
tree_exists = []
for tree in trees:
data = None
connection = self._get_connection(self.shot, tree)
try:
data = connection.get('\{}::userid'.format(tree)).value
except:
pass
if data and data is not '*':
tree_exists.append(tree)
self._efits = tree_exists
return self._efits
class Logbook(object):
def __init__(self, name='nstx', root=None):
self._name = name.lower()
self._root = root
self._credentials = {}
self._table = ''
self._shotlist_query_prefix = ''
self._shot_query_prefix = ''
self._logbook_connection = None
self._make_logbook_connection()
# dict of cached logbook entries
# kw is shot, value is list of logbook entries
self.logbook = {}
def _make_logbook_connection(self):
self._credentials = LOGBOOK_CREDENTIALS[self._name]
self._table = self._credentials['table']
self._shotlist_query_prefix = (
'SELECT DISTINCT rundate, shot, xp, voided '
'FROM {} WHERE voided IS null').format(self._table)
self._shot_query_prefix = (
'SELECT dbkey, username, rundate, shot, xp, topic, text, entered, voided '
'FROM {} WHERE voided IS null').format(self._table)
try:
self._logbook_connection = pymssql.connect(
server=self._credentials['server'],
user=self._credentials['username'],
password=self._credentials['password'],
database=self._credentials['database'],
port=self._credentials['port'],
as_dict=True)
except:
print('Attempting logbook server connection as drsmith')
try:
self._logbook_connection = pymssql.connect(
server=self._credentials['server'],
user='drsmith',
password=self._credentials['password'],
database=self._credentials['database'],
port=self._credentials['port'],
as_dict=True)
except:
txt = '{} logbook connection failed. '.format(self._name.upper())
txt = txt + 'Server credentials:'
for key in self._credentials:
txt = txt + ' {0}:{1}'.format(key, self._credentials[key])
raise FdfError(txt)
def _get_cursor(self):
try:
cursor = self._logbook_connection.cursor()
cursor.execute('SET ROWCOUNT 500')
except:
raise FdfError('Cursor error')
return cursor
def _shot_query(self, shot=[]):
cursor = self._get_cursor()
if shot and not iterable(shot):
shot = [shot]
for sh in shot:
if sh not in self.logbook:
query = ('{0} and shot={1} '
'ORDER BY shot ASC, entered ASC'
).format(self._shot_query_prefix, sh)
cursor.execute(query)
rows = cursor.fetchall() # list of logbook entries
for row in rows:
rundate = repr(row['rundate'])
year = rundate[0:4]
month = rundate[4:6]
day = rundate[6:8]
row['rundate'] = dt.date(int(year), int(month), int(day))
self.logbook[sh] = rows
def get_shotlist(self, date=[], xp=[], verbose=False):
# return list of shots for date and/or XP
cursor = self._get_cursor()
rows = []
shotlist = [] # start with empty shotlist
date_list = date
if not iterable(date_list): # if it's just a single date
date_list = [date_list] # put it into a list
for date in date_list:
query = ('{0} and rundate={1} ORDER BY shot ASC'.
format(self._shotlist_query_prefix, date))
cursor.execute(query)
rows.extend(cursor.fetchall())
xp_list = xp
if not iterable(xp_list): # if it's just a single xp
xp_list = [xp_list] # put it into a list
for xp in xp_list:
query = ('{0} and xp={1} ORDER BY shot ASC'.
format(self._shotlist_query_prefix, xp))
cursor.execute(query)
rows.extend(cursor.fetchall())
for row in rows:
rundate = repr(row['rundate'])
year = rundate[0:4]
month = rundate[4:6]
day = rundate[6:8]
row['rundate'] = dt.date(int(year), int(month), int(day))
if verbose:
print('date {}'.format(rows[0]['rundate']))
for row in rows:
print(' {shot} in XP {xp}'.format(**row))
# add shots to shotlist
shotlist.extend([row['shot'] for row in rows
if row['shot'] is not None])
cursor.close()
return np.unique(shotlist)
def get_entries(self, shot=[], date=[], xp=[]):
# return list of lobgook entries (dictionaries) for shot(s)
if shot and not iterable(shot):
shot = [shot]
if xp or date:
shot.extend(self.get_shotlist(date=date, xp=xp))
if shot:
self._shot_query(shot=shot)
entries = []
for sh in np.unique(shot):
if sh in self.logbook:
entries.extend(self.logbook[sh])
return entries
_tree_dict = {}
def Factory(module_branch, root=None, shot=None, parent=None):
global _tree_dict
"""
Factory method
"""
try:
module_branch = module_branch.lower()
module_list = module_branch.split('.')
module = module_list[-1]
branch_str = ''.join([word.capitalize() for word in module_list])
if module_branch not in _tree_dict:
module_path = os.path.join(FDF_DIR, 'modules', *module_list)
parse_tree = ET.parse(os.path.join(module_path,
''.join([module, '.xml'])))
module_tree = parse_tree.getroot()
_tree_dict[module_branch] = module_tree
ContainerClassName = ''.join(['Container', branch_str])
if ContainerClassName not in Container._classes:
ContainerClass = type(ContainerClassName, (Container,), {})
init_class(ContainerClass, _tree_dict[module_branch], root=root,
container=module, classparent=parent.__class__)
Container._classes[ContainerClassName] = ContainerClass
else:
ContainerClass = Container._classes[ContainerClassName]
return ContainerClass(_tree_dict[module_branch], shot=shot,
parent=parent, top=True)
except None:
print("{} not found in modules directory".format(module))
raise
class Container(object):
"""
Container class
"""
_instances = {}
_classes = {}
def __init__(self, module_tree, top=False, **kwargs):
cls = self.__class__
self._signals = {}
self._containers = {}
self._subcontainers = {}
self._title = module_tree.get('title')
self._desc = module_tree.get('desc')
for read_only in ['parent']:
setattr(self, '_'+read_only, kwargs.get(read_only, None))
try:
self.shot = kwargs['shot']
self._mdstree = kwargs['mdstree']
except:
pass
if self.shot is not None:
try:
cls._instances[cls][self.shot].append(self)
except:
cls._instances[cls][self.shot] = [self]
if top:
self._get_subcontainers()
for node in module_tree.findall('node'):
NodeClassName = ''.join(['Node', cls._name.capitalize()])
if NodeClassName not in cls._classes:
NodeClass = type(NodeClassName, (Node, cls), {})
cls._classes[NodeClassName] = NodeClass
else:
NodeClass = cls._classes[NodeClassName]
NodeClass._mdstree = parse_mdstree(self, node)
setattr(self, node.get('name'), NodeClass(node, parent=self))
for element in module_tree.findall('axis'):
signal_list = parse_signal(self, element)
branch_str = self._get_branchstr()
for signal_dict in signal_list:
SignalClassName = ''.join(['Axis', branch_str])
if SignalClassName not in cls._classes:
SignalClass = type(SignalClassName, (Signal, cls), {})
parse_method(SignalClass, element)
cls._classes[SignalClassName] = SignalClass
else:
SignalClass = cls._classes[SignalClassName]
SignalObj = SignalClass(**signal_dict)
refs = parse_refs(self, element, SignalObj._transpose)
if not refs:
refs = SignalObj.axes
for axis, ref in zip(SignalObj.axes, refs):
setattr(SignalObj, axis, getattr(self, '_'+ref))
setattr(self, ''.join(['_', signal_dict['_name']]), SignalObj)
for branch in module_tree.findall('container'):
name = branch.get('name')
branch_str = self._get_branchstr()
ContainerClassName = ''.join(['Container', branch_str,
name.capitalize()])
if ContainerClassName not in cls._classes:
ContainerClass = type(ContainerClassName, (cls, Container), {})
init_class(ContainerClass, branch, classparent=cls)
cls._classes[ContainerClassName] = ContainerClass
else:
ContainerClass = cls._classes[ContainerClassName]
ContainerObj = ContainerClass(branch, parent=self)
setattr(self, name, ContainerObj)
self._containers[name] = ContainerObj
for element in module_tree.findall('signal'):
signal_list = parse_signal(self, element)
branch_str = self._get_branchstr()
for signal_dict in signal_list:
# name = element.get('name').format('').capitalize()
SignalClassName = ''.join(['Signal', branch_str])
if SignalClassName not in cls._classes:
SignalClass = type(SignalClassName, (Signal, cls), {})
parse_method(SignalClass, element)
cls._classes[SignalClassName] = SignalClass
else:
SignalClass = cls._classes[SignalClassName]
SignalObj = SignalClass(**signal_dict)
refs = parse_refs(self, element, SignalObj._transpose)
if not refs:
refs = SignalObj.axes
for axis, ref in zip(SignalObj.axes, refs):
setattr(SignalObj, axis, getattr(self, '_'+ref))
setattr(self, signal_dict['_name'], SignalObj)
self._signals[signal_dict['_name']] = SignalObj
if top and hasattr(self, '_preprocess'):
self._preprocess()
def __getattr__(self, attribute):
try:
if self._subcontainers[attribute] is None:
branch_path = '.'.join([self._get_branch(), attribute])
self._subcontainers[attribute] = \
Factory(branch_path, root=self._root,
shot=self.shot, parent=self)
return self._subcontainers[attribute]
except KeyError:
pass
if not hasattr(self, '_parent') or self._parent is None:
raise AttributeError("Attribute '{}' not found".format(attribute))
if hasattr(self._parent, '_signals') and \
attribute in self._parent._signals:
raise AttributeError("Attribute '{}' not found".format(attribute))
attr = getattr(self._parent, attribute)
if Container in attr.__class__.mro() and attribute[0] is not '_':
raise AttributeError("Attribute '{}' not found".format(attribute))
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
def _get_subcontainers(self):
if len(self._subcontainers) is 0:
container_dir = self._get_path()
if not os.path.isdir(container_dir):
return
files = os.listdir(container_dir)
self._subcontainers = {container: None for container in
files if os.path.isdir(
os.path.join(container_dir, container)) and
container[0] is not '_'}
@classmethod
def _get_path(cls):
branch = cls._get_branch().split('.')
path = os.path.join(FDF_DIR, 'modules')
for step in branch:
newpath = os.path.join(path, step)
if not os.path.isdir(newpath):
break
path = newpath
return path
def __dir__(self):
# print('in dir')
items = self.__dict__.keys()
items.extend(self.__class__.__dict__.keys())
if Signal not in self.__class__.mro():
items.extend(self._subcontainers.keys())
return [item for item in set(items).difference(self._base_items)
if item[0] is not '_']
def __iter__(self):
if not len(self._signals):
items = self._containers.values()
# items.extend(self._subcontainers.values())
else:
items = self._signals.values()
return iter(items)
@classmethod
def _get_branch(cls):
branch = cls._name
parent = cls._classparent
while parent is not Shot and parent.__class__ is not Shot:
branch = '.'.join([parent._name, branch])
parent = parent._classparent
return branch
@classmethod
def _get_branchstr(cls):
branch = cls._get_branch()
return ''.join([sub.capitalize() for sub in branch.split('.')])
def init_class(cls, module_tree, **kwargs):
cls._name = module_tree.get('name')
if cls not in cls._instances:
cls._instances[cls] = {}
for read_only in ['root', 'container', 'classparent']:
try:
setattr(cls, '_'+read_only, kwargs[read_only])
# print(cls._name, read_only, kwargs.get(read_only, 'Not there'))
except:
pass
for item in ['mdstree', 'mdspath', 'units']:
getitem = module_tree.get(item)
if getitem is not None:
setattr(cls, '_'+item, getitem)
cls._base_items = set(cls.__dict__.keys())
parse_method(cls, module_tree)
def parse_method(obj, module_tree):
objpath = obj._get_path()
sys.path.insert(0, objpath)
for method in module_tree.findall('method'):
method_name = method.text
if method_name is None:
method_name = method.get('name')
module = method.get('module')
if module is None:
module = method_name
method_in_module = method.get('method_in_module')
if method_in_module is None:
method_in_module = method_name
module_object = importlib.import_module(module)
method_from_object = module_object.__getattribute__(method_in_module)
setattr(obj, method_name, method_from_object)
sys.path.pop(0)
def base_container(container):
parent_container = container
while type(parent_container._parent) is not Shot:
parent_container = parent_container._parent
return parent_container
def parse_signal(obj, element):
units = parse_units(obj, element)
axes, transpose = parse_axes(obj, element)
number_range = element.get('range')
if number_range is None:
name = element.get('name')
title = element.get('title')
desc = element.get('desc')
mdspath, dim_of = parse_mdspath(obj, element)
mdstree = parse_mdstree(obj, element)
error = parse_error(obj, element)
signal_dict = [{'_name': name, 'units': units, 'axes': axes,
'_mdsnode': mdspath, '_mdstree': mdstree,
'_dim_of': dim_of, '_error': error, '_parent': obj,
'_transpose': transpose, '_title': title,
'_desc': desc}]
else:
number_list = number_range.split(',')
len_number_list = len(number_list)
if len_number_list == 1:
start = 0
end = int(number_list[0])
else:
start = int(number_list[0])
end = int(number_list[1])+1
signal_dict = []
if len_number_list == 3:
# 3rd item, if present, controls zero padding (cf. BES and magnetics)
digits = int(number_list[2])
else:
digits = int(np.ceil(np.log10(end-1)))
for index in range(start, end):
name = element.get('name').format(str(index).zfill(digits))
title = None
if element.get('title'):
title = element.get('title').format(str(index).zfill(digits))
desc = None
if element.get('desc'):
desc = element.get('desc').format(str(index).zfill(digits))
mdspath, dim_of = parse_mdspath(obj, element)
mdspath = mdspath.format(str(index).zfill(digits))
mdstree = parse_mdstree(obj, element)
error = parse_error(obj, element)
signal_dict.append({'_name': name, 'units': units, 'axes': axes,
'_mdsnode': mdspath, '_mdstree': mdstree,
'_dim_of': dim_of, '_error': error,
'_parent': obj, '_transpose': transpose,
'_title': title, '_desc': desc})
return signal_dict
def parse_axes(obj, element):
axes = []
transpose = None
time_ind = 0
try:
axes = [axis.strip() for axis in element.get('axes').split(',')]
if 'time' in axes:
time_ind = axes.index('time')
if time_ind is not 0:
transpose = list(range(len(axes)))
transpose.pop(time_ind)
transpose.insert(0, time_ind)
axes.pop(time_ind)
axes.insert(0, 'time')
except:
pass
return axes, transpose
def parse_refs(obj, element, transpose=None):
refs = None
try:
refs = [ref.strip() for ref in element.get('axes_refs').split(',')]
if transpose is not None:
refs = [refs[index] for index in transpose]
except:
pass
return refs
def parse_units(obj, element):
units = element.get('units')
if units is None:
try:
units = obj.units
except:
pass
return units
def parse_error(obj, element):
error = element.get('error')
if error is not None:
mdspath = element.get('mdspath')
if mdspath is None:
try:
mdspath = obj._mdspath
error = '.'.join([mdspath, error])
except:
pass
else:
error = '.'.join([mdspath, error])
return error
_path_dict = {}
def parse_mdspath(obj, element):
global _path_dict
key = (type(obj), element)
try:
return _path_dict[key]
except KeyError:
mdspath = element.get('mdspath')
try:
dim_of = int(element.get('dim_of'))
except:
dim_of = None
if mdspath is None:
try:
mdspath = obj._mdspath
except:
pass
if mdspath is not None:
mdspath = '.'.join([mdspath, element.get('mdsnode')])
else:
mdspath = element.get('mdsnode')
_path_dict[key] = (mdspath, dim_of)
return mdspath, dim_of
def parse_mdstree(obj, element):
mdstree = element.get('mdstree')
if mdstree is None and hasattr(obj, '_mdstree'):
mdstree = obj._mdstree
return mdstree
def iterable(obj):
try:
iter(obj)
if type(obj) is str:
return False
return True
except TypeError:
return False
class Node(object):
"""
Node class
"""
def __init__(self, element, parent=None):
self._parent = parent
self._name = element.get('name')
self._mdsnode = parse_mdspath(self, element)
self._data = None
self._title = element.get('title')
self._desc = element.get('desc')
self.units = element.get('units')
def __repr__(self):
if self._data is None:
self._data = self._get_mdsdata()
return str(self._data)
def __getattr__(self, attribute):
if attribute is '_parent':
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), attribute))
if self._parent is None:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), attribute))
attr = getattr(self._parent, attribute)
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
if __name__ == '__main__':
nstx = Machine()
s = nstx.s141000
s.bes.ch01.myfft()
# s.bes.ch01.fft2()
| mit |
RPGOne/scikit-learn | sklearn/utils/random.py | 46 | 10523 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if p is not None:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if p is not None:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if p is not None:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 42 | 27323 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
cle1109/scot | scot/tests/test_plotting.py | 4 | 4683 | # Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013 SCoT Development Team
import unittest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.image import AxesImage
from matplotlib.figure import Figure
from scot.eegtopo.topoplot import Topoplot
from scot import plotting as sp
from scot.varbase import VARBase
class TestFunctionality(unittest.TestCase):
def setUp(self):
self.locs = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0]]
self.vals = [[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1] ]
self.topo = Topoplot()
self.topo.set_locations(self.locs)
self.maps = sp.prepare_topoplots(self.topo, self.vals)
def tearDown(self):
plt.close('all')
def test_topoplots(self):
locs, vals, topo, maps = self.locs, self.vals, self.topo, self.maps
self.assertEquals(len(maps), len(vals)) # should get two topo maps
self.assertTrue(np.allclose(maps[0], maps[0].T)) # first map: should be rotationally identical (blob in the middle)
self.assertTrue(np.alltrue(maps[1] == 0)) # second map: should be all zeros
self.assertTrue(np.alltrue(maps[2] == 1)) # third map: should be all ones
#--------------------------------------------------------------------
a1 = sp.plot_topo(plt.gca(), topo, maps[0])
a2 = sp.plot_topo(plt.gca(), topo, maps[0], crange=[-1, 1], offset=(1, 1))
self.assertIsInstance(a1, AxesImage)
self.assertIsInstance(a2, AxesImage)
#--------------------------------------------------------------------
f1 = sp.plot_sources(topo, maps, maps)
f2 = sp.plot_sources(topo, maps, maps, 90, f1)
self.assertIs(f1, f2)
self.assertIsInstance(f1, Figure)
#--------------------------------------------------------------------
f1 = sp.plot_connectivity_topos(topo=topo, topomaps=maps, layout='diagonal')
f2 = sp.plot_connectivity_topos(topo=topo, topomaps=maps, layout='somethingelse')
self.assertEqual(len(f1.axes), len(vals))
self.assertEqual(len(f2.axes), len(vals)*2)
def test_connectivity_spectrum(self):
a = np.array([[[0, 0], [0, 1], [0, 2]],
[[1, 0], [1, 1], [1, 2]],
[[2, 0], [2, 1], [2, 2]]])
f = sp.plot_connectivity_spectrum(a, diagonal=0)
self.assertIsInstance(f, Figure)
self.assertEqual(len(f.axes), 9)
f = sp.plot_connectivity_spectrum(a, diagonal=1)
self.assertEqual(len(f.axes), 3)
f = sp.plot_connectivity_spectrum(a, diagonal=-1)
self.assertEqual(len(f.axes), 6)
def test_connectivity_significance(self):
a = np.array([[[0, 0], [0, 1], [0, 2]],
[[1, 0], [1, 1], [1, 2]],
[[2, 0], [2, 1], [2, 2]]])
f = sp.plot_connectivity_significance(a, diagonal=0)
self.assertIsInstance(f, Figure)
self.assertEqual(len(f.axes), 9)
f = sp.plot_connectivity_significance(a, diagonal=1)
self.assertEqual(len(f.axes), 3)
f = sp.plot_connectivity_significance(a, diagonal=-1)
self.assertEqual(len(f.axes), 6)
def test_connectivity_timespectrum(self):
a = np.array([[[[0, 0], [0, 1], [0, 2]],
[[1, 0], [1, 1], [1, 2]],
[[2, 0], [2, 1], [2, 2]]]]).repeat(4, 0).transpose([1,2,3,0])
f = sp.plot_connectivity_timespectrum(a, diagonal=0)
self.assertIsInstance(f, Figure)
self.assertEqual(len(f.axes), 9)
f = sp.plot_connectivity_timespectrum(a, diagonal=1)
self.assertEqual(len(f.axes), 3)
f = sp.plot_connectivity_timespectrum(a, diagonal=-1)
self.assertEqual(len(f.axes), 6)
def test_circular(self):
w = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
c = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
sp.plot_circular(1, [1, 1, 1], topo=self.topo, topomaps=self.maps)
sp.plot_circular(w, [1, 1, 1], topo=self.topo, topomaps=self.maps)
sp.plot_circular(1, c, topo=self.topo, topomaps=self.maps)
sp.plot_circular(w, c, topo=self.topo, topomaps=self.maps)
sp.plot_circular(w, c, mask=False, topo=self.topo, topomaps=self.maps)
def test_whiteness(self):
np.random.seed(91)
var = VARBase(0)
var.residuals = np.random.randn(10, 5, 100)
pr = sp.plot_whiteness(var, 20, repeats=100)
self.assertGreater(pr, 0.05)
| mit |
timothylwarren/py_utilities | fly_plot_basics.py | 1 | 18649 |
#!/usr/bin/python
import pdb
import pylab
import numpy as np
from matplotlib.lines import Line2D
from py_utilities import tw_plot_library3 as twplt
from py_utilities import fp_library2 as fpl
from py_utilities import tw_calc_library as calc
from py_utilities import tw_filehandling as fh
#from matplotlib.patches import Ellipse
import matplotlib.cm as cm
AXISPAD=2
#pylab.ion()
def make_example_figure(crdt, axmotor,axpol_hist,**kwargs):
try:
displacement_ax=kwargs['displacement_ax']
displacement_flag=True
except:
displacement_flag=False
try:
text_ax=kwargs['text_ax']
text_flag=True
except:
text_flag=False
plot_motor(crdt,axmotor,plot_vector=False,plot_split=1,plot_start_angle=0,xlim=[0,5.5],plot_mean=crdt['mnrad_360'])
plot_mot_hist(crdt,axpol_hist)
if displacement_flag:
plot_displacement(displacement_ax, crdt,add_net_displacement=True)
if text_flag:
add_text_name(text_ax,crdt)
def line_vec_strength(indt,ax):
#if 'type' in kwargs:
# inds=np.intersect1d(np.where(all_time_list>self.crtimeinds[0])[0],np.where(all_time_list<self.crtimeinds[1])[0])
#else:
all_time_list,all_vec_list=pad_vector_lists(indt)
inds=np.arange(0,len(all_time_list))
plt_veclst=all_vec_list[inds]
plt_timelst=all_time_list[inds]
ax.plot(plt_timelst,plt_veclst,linewidth=0.5)
fpl.adjust_spines(ax,['left', 'bottom'])
ax.set_ylim([0,1])
ax.set_xlim([0,15])
def mk_str(intlist):
return [str(i) for i in intlist]
def add_text_name(ax,crdt):
fname=crdt['fname']
cutname=fname.split('/cloop')
#tst=['', 'home', 'timothy', 'data', '20170505', 'fly1-rnach']
tst=cutname[0].split('/')
#printstr='20170505/fly1-rnach'
printstr=tst[-2]+'/'+tst[-1]
ax.text(0,0,printstr,fontsize=5)
ax.axis('off')
def plot_traj_by_vec_strength(ax,mn_drxn,vec_strength,**kwargs):
ax.plot(mn_drxn,vec_strength)
ax.get_yaxis().set_ticks([])
ax.title.set_visible(False)
ax.get_xaxis().set_ticklabels([])
ax.spines['polar'].set_color('none')
def plot_displacement(ax, indt,**kwargs):
try:
add_net_flag=kwargs['add_net_displacement']
except:
add_net_flag=False
plot_positions={}
for key in ['x','y']:
plot_positions[key]=[]
for key in ['x','y']:
plot_positions[key].append(indt['displacement_traj']['raw'][key])
polar_positions=calc.linear_to_polar(plot_positions)
col='b'
indvls=np.arange(len(polar_positions['theta']))
for ind in indvls:
ax.plot(polar_positions['theta'][ind]+np.pi/2,polar_positions['len'][ind],color=col)
ax.plot(polar_positions['theta'][ind][-1]+np.pi/2,polar_positions['len'][ind][-1],'o',color='k')
ax.get_xaxis().set_ticks([0,np.pi/2.,np.pi,3.*(np.pi/2.)])
ax.get_xaxis().set_ticklabels(['0','90','180','270'],fontsize=8)
ax.get_yaxis().set_ticks([])
ax.get_yaxis().set_ticklabels([],fontsize=8)
#pdb.set_trace()
ax.set_ylim([0,10000])
if add_net_flag:
ax.plot([indt['mnrad_360']+np.pi/2,indt['mnrad_360']+np.pi/2],[0,polar_positions['len'][ind][-1]],'k--')
def pad_vector_lists(indt):
#vectimevls=self.crdt['vec_time_lst']-self.crdt['time_in_min'][0]
time_space=indt['vec_time_lst'][1]-indt['vec_time_lst'][0]
init_time_vls=np.arange(indt['time_in_min'][0],np.min(indt['vec_time_lst']),time_space)
end_time_vls=np.arange(np.max(indt['vec_time_lst']),indt['time_in_min'][-1],time_space)
timelst=np.concatenate([init_time_vls,np.array(indt['vec_time_lst']),end_time_vls])-indt['time_in_min'][0]
#pdb.set_trace()
veclst=np.concatenate([np.zeros(len(init_time_vls)),np.array(indt['len_vector_lst']),np.zeros(len(end_time_vls))])
return timelst,veclst
def make_raw_plot(crdt,axmotor, axhist):
#COLNUM=-1
TIME_GAP=5
for cr_fltnum in crdt.keys():
if crdt[cr_fltnum]:
mnvl_in_rad=crdt[cr_fltnum]['mnrad_360']
halt_flag=False
offset_time=0
if cr_fltnum==1:
offset_time=crdt[cr_fltnum-1]['time_in_min'][-1]
elif cr_fltnum>1:
offset_time=crdt[cr_fltnum-1]['time_in_min'][-1]-TIME_GAP
plot_motor(crdt[cr_fltnum],axmotor,plot_vector=False,plot_split=1,plot_start_angle=0,subtract_zero_time=True,offset_time=offset_time,plot_vert_line_at_end=True, halt_flag=halt_flag)
axmotor.set_xlim([0,15.5])
#if COLNUM:
# axmotor[crkey][flyindnum][COLNUM].axis('off')
# axhist[crkey][flyindnum][COLNUM].axis('off')
try:
crax=axhist[cr_fltnum]
except:
pdb.set_trace()
crax.step(crdt[cr_fltnum]['normhst'],crdt[cr_fltnum]['xrad'][0:-1]+crdt[cr_fltnum]['rad_per_bin']/2,'k',linewidth=1)
#self.col_num[crkey]=self.col_num[crkey]+1
fpl.adjust_spines(crax,[])
crax.set_ylim([-calc.deg_to_rad(0.),calc.deg_to_rad(360.0)])
crax.plot(0.21,mnvl_in_rad,'r<')
crax.set_xlim([0,0.24])
def plot_motor(indt,ax,withhold_bottom_axis=False,one_line_label=False,xlabelpad=-3,**kwargs):
VERTVL=370
if 'zoom_times' in kwargs:
type='zoom'
mot_inds=self.calc_zoom_inds(kwargs['zoom_times'])
else:
mot_inds=np.arange(0,len(indt['time_in_min']))
type='nozoom'
plot_split_flag=0
try:
subtract_zero_time_flag=kwargs['subtract_zero_time']
except:
subtract_zero_time_flag=True
try:
plot_vert_line_at_end=kwargs['plot_vert_line_at_end']
except:
plot_vert_line_at_end=False
try:
mnvl=kwargs['plot_mean']
except:
mnvl=[]
try:
center_on_zero_flag=kwargs['center_on_zero_flag']
except:
center_on_zero_flag=False
try:
flag_360=kwargs['flag_360']
except:
flag_360=0
try:
offset_to_subtract=kwargs['offset_to_subtract']
except:
offset_to_subtract=0
try:
halt_flag=kwargs['halt_flag']
except:
halt_flag=False
try:
offset_time=kwargs['offset_time']
except:
offset_time=0
if 'plot_vertical' in kwargs:
plot_vert_flag=1
else:
plot_vert_flag=0
if 'xticks' in kwargs:
xtickflag=1
else:
xtickflag=0
if 'plot_split' in kwargs:
if kwargs['plot_split']:
plot_split_flag=1
if 'boundary_times' in kwargs:
time_flag=1
else:
time_flag=0
if 'xlim' in kwargs:
xlim=kwargs['xlim']
else:
xlim=[0,20]
if 'plot_vector' in kwargs:
if kwargs['plot_vector']:
if 'vector_threshold' in kwargs:
vec_threshold=kwargs['vector_threshold']
else:
vec_threshold=self.vec_threshold
try:
if len(self.crdt['vec_time_lst'])>1:
if kwargs['plot_vector']:
all_time_list,all_vec_list=self.pad_vector_lists()
if 'type' in kwargs:
inds=np.intersect1d(np.where(all_time_list>self.crtimeinds[0])[0],np.where(all_time_list<self.crtimeinds[1])[0])
else:
inds=np.arange(0,len(all_time_list))
inds_thresh=np.where(all_vec_list[inds]>vec_threshold)
#2 columns with x values to plot.
if len(inds[inds_thresh]):
[startinds,stopinds]=calc.find_consecutive_values(inds[inds_thresh])
twplt.plot_horizontal_lines(ax,all_time_list[startinds],all_time_list[stopinds],VERTVL)
except:
pdb.set_trace()
tst=1
if 'plot_left_axis' in kwargs:
plot_left_axis=kwargs['plot_left_axis']
else:
plot_left_axis=True
mot_tmp=indt['mot_deg'][mot_inds]
if subtract_zero_time_flag:
time=indt['time_in_min'][mot_inds]-indt['time_in_min'][0]
else:
time=indt['time_in_min'][mot_inds]
if offset_time:
time=time+offset_time
if halt_flag:
pdb.set_trace()
mot_rad=calc.deg_to_rad(mot_tmp)-offset_to_subtract
mot_tmp=calc.rad_to_deg(calc.standardize_angle(mot_rad,2*np.pi,force_positive=1))
if center_on_zero_flag:
mot=calc.center_deg_on_zero(mot_tmp)
else:
mot=mot_tmp
sub_plot_motor(ax,time,mot, **kwargs)
if plot_vert_line_at_end:
ax.plot([time[-1],time[-1]],[0,360],'b',linewidth=0.5)
if mnvl:
deg_mn=calc.rad_to_deg(mnvl)
#ax.plot([0,15],[deg_mn, deg_mn],linestyle='--', dashes=(2, 1),color='r')
#ax.plot([0,15],[deg_mn+180, deg_mn+180],linestyle='--', dashes=(2, 1),color='r')
#ax.plot([0, 1], [0, 1], linestyle='--', dashes=(5, 1)) #length of 5, space of 1
if 'plot_start_angle' in kwargs:
if kwargs['plot_start_angle']:
#ax.plot(time[0]+0.1,calc.standardize_angle(self.params['adjusted_start_angle']),'c>')
ax.plot(time[0]-0.5,calc.standardize_angle(calc.rad_to_deg(self.params['calculated_start_angle']),180.0),'c>',markersize=7)
if 'marker_time' in kwargs:
#need to find the value of the motor at that time_duration
#first need to find the closest index
crind=np.argmin(np.abs(time-kwargs['marker_time']))
ax.plot(time[crind],mot[crind],'co')
if plot_left_axis:
if not withhold_bottom_axis:
fpl.adjust_spines(ax,['left','bottom'])
else:
fpl.adjust_spines(ax,['left'])
ax.get_xaxis().set_ticklabels([],fontsize=6)
else:
if withhold_bottom_axis:
fpl.adjust_spines(ax,['left'])
ax.get_xaxis().set_ticklabels([],fontsize=6)
else:
fpl.adjust_spines(ax,['bottom'])
#elif (plot_left_axis) and (self.last_row_flag==0):
# ax.axis('off')
#fpl.adjust_spines(ax,['left'])
if plot_left_axis:
if center_on_zero_flag:
ax.get_yaxis().set_ticks([-180,0,180])
ax.get_yaxis().set_ticklabels(['-180','0','180'],fontsize=6)
ax.set_ylim([-180,180])
else:
ax.get_yaxis().set_ticks([0,90,180,270,360])
ax.get_yaxis().set_ticklabels(['0','90','180','270','360'],fontsize=6)
ax.set_ylim([0,360])
if one_line_label:
ylab='polarizer ($^\circ$)'
else:
ylab='polarizer\n($^\circ$)'
ax.set_ylabel(ylab, fontsize=6)
else:
if center_on_zero_flag:
ax.set_ylim([-180,180])
else:
ax.set_ylim([0,360])
if xtickflag:
xticks=kwargs['xticks']
xticklabels=kwargs['xticklabels']
else:
xticks=[0,15]
xticklabels=mk_str(xticks)
try:
time_offset=kwargs['time_offset']
except:
time_offset=0
if time_offset:
xticklabels=mk_str(np.array(xticks)+time_offset)
if plot_vert_flag:
ax.plot([kwargs['plot_vertical'],kwargs['plot_vertical']],[-20,380],'r')
if not withhold_bottom_axis:
try:
if self.last_row_flag:
#ax.set_ylabel('polarizer heading', fontsize=9)
ax.get_xaxis().set_ticks(xticks)
ax.get_xaxis().set_ticklabels(xticklabels,fontsize=6)
ax.set_xlabel('time (min.)', fontsize=6)
except:
ax.get_xaxis().set_ticks(xticks)
ax.get_xaxis().set_ticklabels(xticklabels,fontsize=6)
ax.set_xlabel('time (min.)', fontsize=6)
#pdb.set_trace()
ax.set_xlim(xlim)
#ax.set_aspect(0.005)
ax.xaxis.labelpad = xlabelpad
ax.yaxis.labelpad= 1
##
#This function plots position values in a manner that removes artefactual lines from data wrapping around
#inputs are
#ax, handle to axis
#time- list of timevalues
#mot - list of degrees between 0 and 360
def sub_plot_motor(ax,time,mot,linewidth=0.5,**kwargs):
try:
max_allowed_difference=kwargs['max_allowed_difference']
except:
max_allowed_difference=50
try:
plot_flag=kwargs['plot_flag']
except:
plot_flag=True
try:
col=kwargs['color']
except:
col='k'
absolute_diff_vls=abs(np.diff(mot))
#these are indices to split the incoming array because the difference between neighboring
#values exceeds threshold
breakinds=np.where(absolute_diff_vls>max_allowed_difference)[0]
#breakinds+1 is to get correct index
#this outputs an array of arrays, which will be plotted
mot_split_array=np.array_split(mot,breakinds+1)
time_split_array=np.array_split(time,breakinds+1)
#loops through the arrays to plot each value
if plot_flag:
for crind,crmot_splitinds in enumerate(mot_split_array):
if np.size(crmot_splitinds):
if len(crmot_splitinds>3):
ax.plot(time_split_array[crind],crmot_splitinds,color=col,linewidth=linewidth)
else:
pdb.set_trace()
return time_split_array, mot_split_array
def plot_mot_hist(indt,crax,**kwargs):
crax.step(indt['normhst'],indt['xrad'][0:-1]+indt['rad_per_bin']/2,'k',linewidth=1)
fpl.adjust_spines(crax,[])
crax.set_ylim([-calc.deg_to_rad(20.),calc.deg_to_rad(380.0)])
crax.plot(0.22,indt['mnrad_360'],'r<')
crax.set_xlim([0,0.24])
def make_heat_map(ax,heatdt,**kwargs):
POWER_VALUE=5
plt_type=kwargs['plt_type']
try:
transect_ax=kwargs['transect_ax']
plot_transect_flag=True
try:
ax_schematic=kwargs['ax_schematic']
except:
ax_schematic=[]
except:
plot_transect_flag=False
try:
sub_flag=kwargs['sub_heat_map_flag']
except:
sub_flag=False
try:
paired_flagvl=kwargs['paired_flag']
except:
paired_flagvl=False
# try:
# aligned_flag=kwargs['aligned']
# except:
# aligned_flag=False
#try:
# colorbar_ax=kwargs['colorbar_ax']
# fig_flag=kwargs['fig_flag']
# plot_colorbar_flag=True
#except:
# plot_colorbar_flag=False
try:
renorm_flag=kwargs['renorm']
except:
renorm_flag=False
if sub_flag:
cr_heatmap_data=heatdt['sub_heat_map'][plt_type]
if renorm_flag:
cr_heatmap_data['norm_heat_map_vls']=cr_heatmap_data['norm_heat_map_vls']/sum(sum(cr_heatmap_data['norm_heat_map_vls']))
twplt.polar_heat_map(cr_heatmap_data,ax=ax,shift_vertical_flag=True,sub_flag=sub_flag,**kwargs)
elif paired_flagvl:
try:
heatmap_list=heatdt['full_heat_map'][plt_type]
except:
pdb.set_trace()
if not 'redges' in heatmap_list.keys():
heatmap_list['redges']=heatmap_list['r'][:,0]
heatmap_list['thetaedges']=heatmap_list['theta'][0,:]
cr_heatmap_data=heatmap_list
twplt.polar_heat_map(heatmap_list,ax=ax,shift_vertical_flag=True,sub_flag=sub_flag,sep_max_flag=True,**kwargs)
else:
cr_heatmap_data=heatdt['full_heat_map'][plt_type]
base_bnds=np.array([-np.pi/9, np.pi/9])
#bnd_sectors=[base_bnds, base_bnds+np.pi/2, base_bnds+2*np.pi/2, base_bnds+3*np.pi/2]
#arc_colvls=['r', 'k' ,'c' ,'b','m','g']
arc_bnd_sectors=[base_bnds, base_bnds+np.pi/2, base_bnds+2*np.pi/2, base_bnds+3*np.pi/2]
arc_colors=['lime','k','darkgreen',(0.5,0.5,0.5)]
trans_bnd_sectors=[base_bnds, base_bnds+np.pi/2, base_bnds+2*np.pi/2, base_bnds+3*np.pi/2]
transect_colors=['k','lime',(0.5,0.5,0.5),'darkgreen']
if renorm_flag:
cr_heatmap_data['norm_heat_map_vls']=cr_heatmap_data['norm_heat_map_vls']/sum(sum(cr_heatmap_data['norm_heat_map_vls']))
#if plot_colorbar_flag:
twplt.polar_heat_map(cr_heatmap_data,ax=ax,shift_vertical_flag=True,sub_flag=sub_flag,**kwargs)
#else:
# twplt.polar_heat_map(ax,cr_heatmap_data,shift_vertical_flag=True,aligned=aligned_flag,sub_flag=sub_flag)
if plot_transect_flag:
base_bnds=np.array([-np.pi/9, np.pi/9])
bnd_sectors=[base_bnds, base_bnds+np.pi/2, base_bnds+2*np.pi/2, base_bnds+3*np.pi/2]
if transect_ax:
twplt.plot_transects(transect_ax,cr_heatmap_data,ax_schematic=ax_schematic,bnds=bnd_sectors,**kwargs)
def arbitrary_transect_from_heat_map(ax,heatdt,color='k',plot_mean=False,vecminvls=[0.9],withhold_plot=False,**kwargs):
histvl={}
# num_inds_to_use=len(np.where(heatdt['thetaedges']>0.9)[0])
for ind,crvecminvl in enumerate(vecminvls):
if crvecminvl==0.9:
startvl=2
elif crvecminvl==0.8:
startvl=4
sub_array=heatdt['norm_heat_map_vls'][:,-startvl:]
sumvls=np.sum(sub_array,axis=1)
norm_sumvls=sumvls/np.sum(sumvls)
histvl[crvecminvl]=norm_sumvls
if not withhold_plot:
crmean=calc.weighted_mean(norm_sumvls,heatdt['redges'],mn_type='norm')
ax.step(heatdt['redges'][:-1],norm_sumvls,color=color,linewidth=0.5)
if plot_mean:
ax.plot(crmean,kwargs['mnht'],'v',color=color,markersize=2,clip_on=False)
return histvl
def plot_wings(indt,ax,**kwargs):
colvls=['r','c']
if 'xlim' in kwargs:
xlim=kwargs['xlim']
else:
xlim=[0,25]
if 'type' in kwargs:
inds=self.cr_zoominds
type='zoom'
else:
inds=np.arange(0,len(indt['time_in_min']))
type='nozoom'
plot_time=indt['time_in_min'][inds]-indt['time_in_min'][0]
lftwng=indt['lftwng'][inds]
rtwng=indt['rtwng'][inds]
ax.plot(plot_time,np.array(lftwng),colvls[0],linewidth=0.3)
ax.plot(plot_time,np.array(rtwng),colvls[1],linewidth=0.3)
ax.get_yaxis().set_ticks([25,80])
fpl.adjust_spines(ax,['left','bottom'])
ax.get_yaxis().set_ticklabels(['25','80'],fontsize=6)
ax.set_ylabel('wing angle', fontsize=6)
ax.set_ylim([25,80])
ax.text(0,70,'left wing',fontsize=4,color=colvls[0])
ax.text(5,70,'right wing',fontsize=4,color=colvls[1])
if type=='zoom':
ax.set_xlim([plot_time[0],plot_time[-1]])
else:
ax.set_xlim(xlim)
| gpl-3.0 |
aetilley/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 43 | 28175 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
assert_true(isinstance(estimator[0].steps[-1][1].random_state,
int))
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_)
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5,
max_features=0.5, random_state=1,
bootstrap=False)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert_equal(len(estimators_samples), len(estimators))
assert_equal(len(estimators_samples[0]), len(X))
assert_equal(estimators_samples[0].dtype.kind, 'b')
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert_equal(bagging._max_samples, max_samples)
| bsd-3-clause |
hexiang6666/SASSI_converter | distorted_element_test/distorted_5_node/plot.py | 3 | 2504 | import numpy as np
import matplotlib.pyplot as plt
import h5py
def h52stressStrain(h5in_filename):
h5in=h5py.File(h5in_filename,"r")
outputs_all=h5in['/Model/Elements/Gauss_Outputs'][()]
stress = 1e-6*outputs_all[16 , 1:-1]
strain = outputs_all[4 , 1:-1]
return [stress, strain]
[stress_load, strain_load] = h52stressStrain("vm_2shearing.h5.feioutput")
[stress_unload, strain_unload] = h52stressStrain("vm_3unloading.h5.feioutput")
[stress_reload, strain_reload] = h52stressStrain("vm_4reloading.h5.feioutput")
stress = np.concatenate((stress_load,stress_unload,stress_reload))
strain = np.concatenate((strain_load,strain_unload,strain_reload))
def h52Dis(h5in_filename,initial_num):
h5in = h5py.File(h5in_filename, "r")
output_dis =h5in['/Model/Nodes/Generalized_Displacements'][()]
dis = output_dis[6,:] ## 6 corresponds to node ID 3
Num_time_step = len(dis)
time_step = list(range(initial_num, initial_num+Num_time_step))
return [dis, time_step]
[dis_confine, time_step_confine] = h52Dis("vm_1Confine.h5.feioutput",0)
[dis_load, time_step_load] = h52Dis("vm_2shearing.h5.feioutput", time_step_confine[-1]+1)
[dis_unload, time_step_unload] = h52Dis("vm_3unloading.h5.feioutput", time_step_load[-1]+1)
[dis_reload, time_step_reload] = h52Dis("vm_4reloading.h5.feioutput", time_step_unload[-1]+1)
dis = np.concatenate((dis_confine, dis_load, dis_unload, dis_reload))
time_step = np.concatenate((time_step_confine,time_step_load,time_step_unload,time_step_reload))
# print dis, time_step
if len(dis) == len(time_step) :
plt.plot(time_step, dis, 'k', linewidth=3)
plt.xlabel('Time step', fontname='Arial', fontsize=42, labelpad=15) #fontweight='bold')
plt.ylabel('$Displacement$ [m]', fontname='Arial', fontsize=42) #fontweight='bold')
plt.show()
#============================== Written by Yuan to plot the stress strain relationship =============================================
# plt.plot(strain, stress, 'k', linewidth= 3)
# plt.xlabel('$\epsilon$', fontname='Arial', fontsize=42, labelpad=15) #fontweight='bold')
# plt.ylabel('$\sigma$ [MPa]', fontname='Arial', fontsize=42) #fontweight='bold')
# # plt.title('Material Behavior: Stress-Strain')
# plt.grid(False)
# plt.axis('on')
# plt.tick_params(axis='both', which='major', labelsize= 36)
# # plt.box()
# plt.savefig('standard_stress_strain.pdf')
# plt.show()
#=================================================================================================================================
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/text_labels_and_annotations/fonts_demo.py | 1 | 2908 | """
==========
Fonts Demo
==========
Show how to set custom font properties.
For interactive users, you can also use kwargs to the text command,
which requires less typing. See examples/fonts_demo_kw.py
"""
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
plt.subplot(111, facecolor='w')
font0 = FontProperties()
alignment = {'horizontalalignment': 'center', 'verticalalignment': 'baseline'}
# Show family options
families = ['serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']
font1 = font0.copy()
font1.set_size('large')
t = plt.text(-0.8, 0.9, 'family', fontproperties=font1,
**alignment)
yp = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
for k, family in enumerate(families):
font = font0.copy()
font.set_family(family)
t = plt.text(-0.8, yp[k], family, fontproperties=font,
**alignment)
# Show style options
styles = ['normal', 'italic', 'oblique']
t = plt.text(-0.4, 0.9, 'style', fontproperties=font1,
**alignment)
for k, style in enumerate(styles):
font = font0.copy()
font.set_family('sans-serif')
font.set_style(style)
t = plt.text(-0.4, yp[k], style, fontproperties=font,
**alignment)
# Show variant options
variants = ['normal', 'small-caps']
t = plt.text(0.0, 0.9, 'variant', fontproperties=font1,
**alignment)
for k, variant in enumerate(variants):
font = font0.copy()
font.set_family('serif')
font.set_variant(variant)
t = plt.text(0.0, yp[k], variant, fontproperties=font,
**alignment)
# Show weight options
weights = ['light', 'normal', 'medium', 'semibold', 'bold', 'heavy', 'black']
t = plt.text(0.4, 0.9, 'weight', fontproperties=font1,
**alignment)
for k, weight in enumerate(weights):
font = font0.copy()
font.set_weight(weight)
t = plt.text(0.4, yp[k], weight, fontproperties=font,
**alignment)
# Show size options
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large']
t = plt.text(0.8, 0.9, 'size', fontproperties=font1,
**alignment)
for k, size in enumerate(sizes):
font = font0.copy()
font.set_size(size)
t = plt.text(0.8, yp[k], size, fontproperties=font,
**alignment)
# Show bold italic
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-small')
t = plt.text(-0.4, 0.1, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('medium')
t = plt.text(-0.4, 0.2, 'bold italic', fontproperties=font,
**alignment)
font = font0.copy()
font.set_style('italic')
font.set_weight('bold')
font.set_size('x-large')
t = plt.text(-0.4, 0.3, 'bold italic', fontproperties=font,
**alignment)
plt.axis([-1, 1, 0, 1])
plt.show()
| mit |
larsmans/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 26 | 13430 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warn("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warn("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
data.description = 'the 20 newsgroups by date dataset'
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
melphi/algobox | python/algobox/src/algobox/analysis/main.py | 1 | 3337 | from datetime import datetime
from algobox.analysis.plot import OverviewPlotBuilder
from algobox.client.generated.api.rest import ApiException
class OverviewWrapper(object):
"""Contains overview data and plot."""
def __init__(self, plot, prices, opening_5min_bar, previous_day_bar):
"""
Arguments:
plot (matplotlib.figure.Figure): The plot object
prices (numpy.ndarray): The prices
opening_5min_bar (algobox.price.PriceBar)
previous_day_bar (algobox.price.PriceBar)
"""
self.plot = plot
self.prices = prices
self.opening_5min_bar = opening_5min_bar
self.previous_day_bar = previous_day_bar
class QuickAnalysis(object):
def __init__(self, algobox_client):
"""
Args:
algobox_client (algobox.client.AlgoboxClient): The algobox client
"""
self._prices_client = algobox_client.prices_client
self._instruments_client = algobox_client.instruments_client
self._indicators_client = algobox_client.indicators_client
def _get_ohlc_if_exists(self, instrument_id, from_timestamp, to_timestamp):
"""
Args:
instrument_id (str)
from_timestamp (long)
to_timestamp (long)
Returns:
algobox.price.PriceBar: The price bar or None
"""
try:
return self._indicators_client.get_ohlc(
instrument_id=instrument_id,
from_timestamp=from_timestamp,
to_timestamp=to_timestamp)
except ApiException:
return None
def day_overview(self, *, instrument_id, date):
"""Returns an overview (plot and data) of the day.
Args:
instrument_id (str): The instrument id.
date (datetime): Any date of the day.
Returns:
algobox.analysis.OverviewWrapper: The overview object. Returns
None if no data was found.
"""
assert instrument_id
assert type(date) == datetime
timestamp_utc = int(date.timestamp() * 1000.0)
market_hours = self._instruments_client.get_market_hours(
instrument_id=instrument_id, timestamp=timestamp_utc)
if market_hours is None or market_hours.market_open is None:
return None
prices = self._prices_client.get_price_ticks_ndarray(
instrument_id, market_hours.market_open, market_hours.market_close)
if prices is None or prices.size <= 0:
return None
opening_range_bar = self._get_ohlc_if_exists(
instrument_id, market_hours.orb5min_open,
market_hours.orb5min_close)
previous_day_bar = self._get_ohlc_if_exists(
instrument_id, market_hours.previous_market_open,
market_hours.previous_market_close)
plot = OverviewPlotBuilder().with_prices(prices) \
.with_titles([instrument_id, date.strftime('%a %d-%m-%y')])\
.with_opening_range_bar(opening_range_bar) \
.with_previous_day_bar(previous_day_bar) \
.build()
return OverviewWrapper(prices=prices, plot=plot,
opening_5min_bar=opening_range_bar,
previous_day_bar=previous_day_bar)
| apache-2.0 |
miloharper/neural-network-animation | matplotlib/tests/test_tightlayout.py | 9 | 4316 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
import matplotlib.pyplot as plt
from nose.tools import assert_raises
from numpy.testing import assert_array_equal
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
@image_comparison(baseline_images=['tight_layout1'])
def test_tight_layout1():
'Test tight_layout for a single subplot'
fig = plt.figure()
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout2'])
def test_tight_layout2():
'Test tight_layout for mutiple subplots'
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout3'])
def test_tight_layout3():
'Test tight_layout for mutiple subplots'
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout4'],
freetype_version=('2.4.5', '2.4.9'))
def test_tight_layout4():
'Test tight_layout for subplot2grid'
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout5'])
def test_tight_layout5():
'Test tight_layout for image'
fig = plt.figure()
ax = plt.subplot(111)
arr = np.arange(100).reshape((10, 10))
ax.imshow(arr, interpolation="none")
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout6'])
def test_tight_layout6():
'Test tight_layout for gridspec'
# This raises warnings since tight layout cannot
# do this fully automatically. But the test is
# correct since the layout is manually edited
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
fig = plt.figure()
import matplotlib.gridspec as gridspec
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.45)
@image_comparison(baseline_images=['tight_layout7'])
def test_tight_layout7():
# tight layout with left and right titles
fig = plt.figure()
fontsize = 24
ax = fig.add_subplot(111)
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Left Title', loc='left', fontsize=fontsize)
ax.set_title('Right Title', loc='right', fontsize=fontsize)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout8'])
def test_tight_layout8():
'Test automatic use of tight_layout'
fig = plt.figure()
fig.set_tight_layout({'pad': .1})
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
| mit |
kaushik94/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 6 | 1471 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n), p[ix,i], "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n-1)
plt.tight_layout()
return
| mit |
jeromecc/docker-fhio-yakkety | noVNC/utils/json2graph.py | 46 | 6674 | #!/usr/bin/env python
'''
Use matplotlib to generate performance charts
Copyright 2011 Joel Martin
Licensed under MPL-2.0 (see docs/LICENSE.MPL-2.0)
'''
# a bar plot with errorbars
import sys, json, pprint
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def usage():
print "%s json_file level1 level2 level3 [legend_height]\n\n" % sys.argv[0]
print "Description:\n"
print "level1, level2, and level3 are one each of the following:\n";
print " select=ITEM - select only ITEM at this level";
print " bar - each item on this level becomes a graph bar";
print " group - items on this level become groups of bars";
print "\n";
print "json_file is a file containing json data in the following format:\n"
print ' {';
print ' "conf": {';
print ' "order_l1": [';
print ' "level1_label1",';
print ' "level1_label2",';
print ' ...';
print ' ],';
print ' "order_l2": [';
print ' "level2_label1",';
print ' "level2_label2",';
print ' ...';
print ' ],';
print ' "order_l3": [';
print ' "level3_label1",';
print ' "level3_label2",';
print ' ...';
print ' ]';
print ' },';
print ' "stats": {';
print ' "level1_label1": {';
print ' "level2_label1": {';
print ' "level3_label1": [val1, val2, val3],';
print ' "level3_label2": [val1, val2, val3],';
print ' ...';
print ' },';
print ' "level2_label2": {';
print ' ...';
print ' },';
print ' },';
print ' "level1_label2": {';
print ' ...';
print ' },';
print ' ...';
print ' },';
print ' }';
sys.exit(2)
def error(msg):
print msg
sys.exit(1)
#colors = ['#ff0000', '#0863e9', '#00f200', '#ffa100',
# '#800000', '#805100', '#013075', '#007900']
colors = ['#ff0000', '#00ff00', '#0000ff',
'#dddd00', '#dd00dd', '#00dddd',
'#dd6622', '#dd2266', '#66dd22',
'#8844dd', '#44dd88', '#4488dd']
if len(sys.argv) < 5:
usage()
filename = sys.argv[1]
L1 = sys.argv[2]
L2 = sys.argv[3]
L3 = sys.argv[4]
if len(sys.argv) > 5:
legendHeight = float(sys.argv[5])
else:
legendHeight = 0.75
# Load the JSON data from the file
data = json.loads(file(filename).read())
conf = data['conf']
stats = data['stats']
# Sanity check data hierarchy
if len(conf['order_l1']) != len(stats.keys()):
error("conf.order_l1 does not match stats level 1")
for l1 in stats.keys():
if len(conf['order_l2']) != len(stats[l1].keys()):
error("conf.order_l2 does not match stats level 2 for %s" % l1)
if conf['order_l1'].count(l1) < 1:
error("%s not found in conf.order_l1" % l1)
for l2 in stats[l1].keys():
if len(conf['order_l3']) != len(stats[l1][l2].keys()):
error("conf.order_l3 does not match stats level 3")
if conf['order_l2'].count(l2) < 1:
error("%s not found in conf.order_l2" % l2)
for l3 in stats[l1][l2].keys():
if conf['order_l3'].count(l3) < 1:
error("%s not found in conf.order_l3" % l3)
#
# Generate the data based on the level specifications
#
bar_labels = None
group_labels = None
bar_vals = []
bar_sdvs = []
if L3.startswith("select="):
select_label = l3 = L3.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l2']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l2 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L2.startswith("select="):
select_label = l2 = L2.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L1.startswith("select="):
select_label = l1 = L1.split("=")[1]
bar_labels = conf['order_l2']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l2 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
else:
usage()
# If group is before bar then flip (zip) the data
if [L1, L2, L3].index("group") < [L1, L2, L3].index("bar"):
bar_labels, group_labels = group_labels, bar_labels
bar_vals = zip(*bar_vals)
bar_sdvs = zip(*bar_sdvs)
print "bar_vals:", bar_vals
#
# Now render the bar graph
#
ind = np.arange(len(group_labels)) # the x locations for the groups
width = 0.8 * (1.0/len(bar_labels)) # the width of the bars
fig = plt.figure(figsize=(10,6), dpi=80)
plot = fig.add_subplot(1, 1, 1)
rects = []
for i in range(len(bar_vals)):
rects.append(plot.bar(ind+width*i, bar_vals[i], width, color=colors[i],
yerr=bar_sdvs[i], align='center'))
# add some
plot.set_ylabel('Milliseconds (less is better)')
plot.set_title("Javascript array test: %s" % select_label)
plot.set_xticks(ind+width)
plot.set_xticklabels( group_labels )
fontP = FontProperties()
fontP.set_size('small')
plot.legend( [r[0] for r in rects], bar_labels, prop=fontP,
loc = 'center right', bbox_to_anchor = (1.0, legendHeight))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if np.isnan(height):
height = 0.0
plot.text(rect.get_x()+rect.get_width()/2., height+20, '%d'%int(height),
ha='center', va='bottom', size='7')
for rect in rects:
autolabel(rect)
# Adjust axis sizes
axis = list(plot.axis())
axis[0] = -width # Make sure left side has enough for bar
#axis[1] = axis[1] * 1.20 # Add 20% to the right to make sure it fits
axis[2] = 0 # Make y-axis start at 0
axis[3] = axis[3] * 1.10 # Add 10% to the top
plot.axis(axis)
plt.show()
| apache-2.0 |
sh1ng/imba | lgbm_submition.py | 1 | 14927 | import gc
import pandas as pd
import numpy as np
import os
import arboretum
import lightgbm as lgb
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
def fscore(true_value_matrix, prediction, order_index, product_index, rows, cols, threshold=[0.5]):
prediction_value_matrix = coo_matrix((prediction, (order_index, product_index)), shape=(rows, cols), dtype=np.float32)
# prediction_value_matrix.eliminate_zeros()
return list(map(lambda x: f1_score(true_value_matrix, prediction_value_matrix > x, average='samples'), threshold))
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_test = order_train.loc[order_train.eval_set == "test", ['order_id', 'product_id']]
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
print(order_train.columns)
###########################
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'}) \
.rename(columns={'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered': ['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_order_median', 'order_dow_mean',
'order_dow_median',
'order_hour_of_day_mean', 'order_hour_of_day_median',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_inverted_median',
'add_to_cart_order_relative_mean', 'add_to_cart_order_relative_median',
'reordered_sum'
]
data['user_product_reordered_ratio'] = (data.reordered_sum + 1.0) / data.up_orders
# data['first_order'] = data['up_orders'] > 0
# data['second_order'] = data['up_orders'] > 1
#
# data.groupby('product_id')['']
data.reset_index(inplace=True)
data = pd.merge(data, prod_stat, on='product_id')
data = pd.merge(data, user_stat, on='user_id')
data['up_order_rate'] = data.up_orders / data.user_orders
data['up_orders_since_last_order'] = data.user_orders - data.up_last_order
data['up_order_rate_since_first_order'] = data.user_orders / (data.user_orders - data.up_first_order + 1)
############################
user_dep_stat = pd.read_pickle('data/user_department_products.pkl')
user_aisle_stat = pd.read_pickle('data/user_aisle_products.pkl')
############### train
print(order_train.shape)
order_train = pd.merge(order_train, products, on='product_id')
print(order_train.shape)
order_train = pd.merge(order_train, orders, on='order_id')
print(order_train.shape)
order_train = pd.merge(order_train, user_dep_stat, on=['user_id', 'department_id'])
print(order_train.shape)
order_train = pd.merge(order_train, user_aisle_stat, on=['user_id', 'aisle_id'])
print(order_train.shape)
order_train = pd.merge(order_train, prod_usr, on='product_id')
print(order_train.shape)
order_train = pd.merge(order_train, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
print(order_train.shape)
order_train = pd.merge(order_train, data, on=['product_id', 'user_id'])
print(order_train.shape)
order_train['aisle_reordered_ratio'] = order_train.aisle_reordered / order_train.user_orders
order_train['dep_reordered_ratio'] = order_train.dep_reordered / order_train.user_orders
order_train = pd.merge(order_train, product_periods, on=['user_id', 'product_id'])
##############
order_test = pd.merge(order_test, products, on='product_id')
order_test = pd.merge(order_test, orders, on='order_id')
order_test = pd.merge(order_test, user_dep_stat, on=['user_id', 'department_id'])
order_test = pd.merge(order_test, user_aisle_stat, on=['user_id', 'aisle_id'])
order_test = pd.merge(order_test, prod_usr, on='product_id')
order_test = pd.merge(order_test, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
order_test = pd.merge(order_test, data, on=['product_id', 'user_id'])
order_test['aisle_reordered_ratio'] = order_test.aisle_reordered / order_test.user_orders
order_test['dep_reordered_ratio'] = order_test.dep_reordered / order_test.user_orders
order_test = pd.merge(order_test, product_periods, on=['user_id', 'product_id'])
order_train = pd.merge(order_train, product_embeddings, on=['product_id'])
order_test = pd.merge(order_test, product_embeddings, on=['product_id'])
print('data is joined')
features = [
# 'reordered_dow_ration', 'reordered_dow', 'reordered_dow_size',
# 'reordered_prev', 'add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev',
'user_product_reordered_ratio', 'reordered_sum',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_relative_mean',
'reorder_prob',
'last', 'prev1', 'prev2', 'median', 'mean',
'dep_reordered_ratio', 'aisle_reordered_ratio',
'aisle_products',
'aisle_reordered',
'dep_products',
'dep_reordered',
'prod_users_unq', 'prod_users_unq_reordered',
'order_number', 'prod_add_to_card_mean',
'days_since_prior_order',
'order_dow', 'order_hour_of_day',
'reorder_ration',
'user_orders', 'user_order_starts_at', 'user_mean_days_since_prior',
# 'user_median_days_since_prior',
'user_average_basket', 'user_distinct_products', 'user_reorder_ratio', 'user_total_products',
'prod_orders', 'prod_reorders',
'up_order_rate', 'up_orders_since_last_order', 'up_order_rate_since_first_order',
'up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position',
# 'up_median_cart_position',
'days_since_prior_order_mean',
# 'days_since_prior_order_median',
'order_dow_mean',
# 'order_dow_median',
'order_hour_of_day_mean',
# 'order_hour_of_day_median'
]
features.extend(embedings)
categories = ['product_id', 'aisle_id', 'department_id']
features.extend(embedings)
cat_features = ','.join(map(lambda x: str(x + len(features)), range(len(categories))))
features.extend(categories)
print('not included', set(order_train.columns.tolist()) - set(features))
data = order_train[features]
labels = order_train[['reordered']].values.astype(np.float32).flatten()
data_val = order_test[features]
assert data.shape[0] == 8474661
lgb_train = lgb.Dataset(data, labels, categorical_feature=cat_features)
# specify your configurations as a dict
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'binary_logloss', 'auc'},
'num_leaves': 256,
'min_sum_hessian_in_leaf': 20,
'max_depth': 12,
'learning_rate': 0.05,
'feature_fraction': 0.6,
# 'bagging_fraction': 0.9,
# 'bagging_freq': 3,
'verbose': 1
}
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=380)
prediction = gbm.predict(data_val)
# prediction = model.predict(data_val)
orders = order_test.order_id.values
products = order_test.product_id.values
result = pd.DataFrame({'product_id': products, 'order_id': orders, 'prediction': prediction})
result.to_pickle('data/prediction_lgbm.pkl')
| agpl-3.0 |
inkenbrandt/EPAEN | Box_and_Whisker/BoxAndWhisker.py | 2 | 5739 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 24 09:44:39 2014
Box and Whisker
http://matplotlib.org/examples/pylab_examples/boxplot_demo2.html
http://matplotlib.org/examples/pylab_examples/boxplot_demo.html
http://stackoverflow.com/questions/16592222/matplotlib-group-boxplots
@author: paulinkenbrandt
"""
from pylab import * # the pylab module combines Pyplot (MATLAB type of plotting) with Numpy into a single namespace
import arcpy
import os
import numpy as np
# if observations are missing, label them as 0
path = os.getcwd()
input = arcpy.GetParameterAsText(0)
arr = arcpy.da.TableToNumPyArray(input, ( arcpy.GetParameterAsText(1),arcpy.GetParameterAsText(6),arcpy.GetParameterAsText(7),arcpy.GetParameterAsText(8)), null_value=0)
nosamp = len(arr[arcpy.GetParameterAsText(7)]) # Determine number of samples in file
# Column Index for parameters
# Convert to meq/L
Geology = arr[arcpy.GetParameterAsText(1)]
#Geo1 = arcpy.GetParameterAsText(1)
#Geo2 = arcpy.GetParameterAsText(2)
#Geo3 = arcpy.GetParameterAsText(3)
Geo1 = arcpy.GetParameterAsText(2)
Geo2 = arcpy.GetParameterAsText(3)
Geo3 = arcpy.GetParameterAsText(4)
Geo4 = arcpy.GetParameterAsText(5)
p1 = arr[arcpy.GetParameterAsText(6)]
p2 = arr[arcpy.GetParameterAsText(7)]
p3 = arr[arcpy.GetParameterAsText(8)]
# function for setting the colors of the box plots pairs
def setBoxColors(bp):
setp(bp['boxes'][0], color='blue')
setp(bp['caps'][0], color='blue')
setp(bp['caps'][1], color='blue')
setp(bp['whiskers'][0], color='blue')
setp(bp['whiskers'][1], color='blue')
setp(bp['fliers'][0], color='blue')
setp(bp['fliers'][1], color='blue')
setp(bp['medians'][0], color='blue')
setp(bp['boxes'][1], color='red')
setp(bp['caps'][2], color='red')
setp(bp['caps'][3], color='red')
setp(bp['whiskers'][2], color='red')
setp(bp['whiskers'][3], color='red')
setp(bp['fliers'][2], color='red')
setp(bp['fliers'][3], color='red')
setp(bp['medians'][1], color='red')
setp(bp['boxes'][2], color='green')
setp(bp['caps'][4], color='green')
setp(bp['caps'][5], color='green')
setp(bp['whiskers'][4], color='green')
setp(bp['whiskers'][5], color='green')
setp(bp['fliers'][4], color='green')
setp(bp['fliers'][5], color='green')
setp(bp['medians'][2], color='green')
setp(bp['boxes'][3], color='magenta')
setp(bp['caps'][6], color='magenta')
setp(bp['caps'][7], color='magenta')
setp(bp['whiskers'][6], color='magenta')
setp(bp['whiskers'][7], color='magenta')
setp(bp['fliers'][6], color='magenta')
setp(bp['fliers'][7], color='magenta')
setp(bp['medians'][3], color='magenta')
A1 = []
for i in range(nosamp):
if Geology[i] == Geo1:
A1.append(p1[i])
A2 = []
for i in range(nosamp):
if Geology[i] == Geo2:
A2.append(p1[i])
A3 = []
for i in range(nosamp):
if Geology[i] == Geo3:
A3.append(p1[i])
A4 = []
for i in range(nosamp):
if Geology[i] == Geo4:
A4.append(p1[i])
B1 = []
for i in range(nosamp):
if Geology[i] == Geo1:
B1.append(p2[i])
B2 = []
for i in range(nosamp):
if Geology[i] == Geo2:
B2.append(p2[i])
B3 = []
for i in range(nosamp):
if Geology[i] == Geo3:
B3.append(p2[i])
B4 = []
for i in range(nosamp):
if Geology[i] == Geo4:
B4.append(p2[i])
C1 = []
for i in range(nosamp):
if Geology[i] == Geo1:
C1.append(p3[i])
C2 = []
for i in range(nosamp):
if Geology[i] == Geo2:
C2.append(p3[i])
C3 = []
for i in range(nosamp):
if Geology[i] == Geo3:
C3.append(p3[i])
C4 = []
for i in range(nosamp):
if Geology[i] == Geo4:
C4.append(p3[i])
A = [A1,A2,A3,A4]
B = [B1,B2,B3,B4]
C = [C1,C2,C3,C4]
fig = figure()
ax = axes()
hold(True)
# first boxplot pair
bp = boxplot(A, positions = [1, 2, 3, 4], widths = 0.6)
setBoxColors(bp)
# second boxplot pair
bp = boxplot(B, positions = [6, 7, 8, 9], widths = 0.6)
setBoxColors(bp)
# thrid boxplot pair
bp = boxplot(C, positions = [11, 12, 13, 14], widths = 0.6)
setBoxColors(bp)
# set axes limits and labels
xlim(0,15)
#ylim(0,9)
ax.set_xticklabels([arcpy.GetParameterAsText(6), arcpy.GetParameterAsText(7), arcpy.GetParameterAsText(8)])
tspc = np.arange(2.5,14,5)
ax.set_xticks(tspc)
ax.set_yscale('log')
ylabel('Concentration (mg/l)')
# draw temporary red and blue lines and use them to create a legend
hB, = plot([1,1],'b-')
hR, = plot([1,1],'r-')
hG, = plot([1,1],'g-')
hO, = plot([1,1],'m-')
legend((hB, hR, hG, hO),(Geo1+' n = '+str(len(A1)), Geo2 + ' n = ' + str(len(A2)), Geo3 + ' n = ' + str(len(A3)), Geo4 + ' n = '+str(len(A4))),loc='upper center', bbox_to_anchor=(0.5, 1.4))
hB.set_visible(False)
hR.set_visible(False)
hG.set_visible(False)
hO.set_visible(False)
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.78])
#text(1,max(A1)+100,'n= '+str(len(A1)), rotation=0, fontsize=8)
#text(2,max(A2)10000,'n= '+str(len(A2)), rotation=90, fontsize=8)
#text(3,max(A3)10000,'n= '+str(len(A3)), rotation=90, fontsize=8)
#text(4,max(A4)10000,'n= '+str(len(A4)), rotation=90, fontsize=8)
#text(6,max(B1)+100,'n= '+str(len(B1)), rotation=0, fontsize=8)
#text(7,max(B2)+4,'n= '+str(len(B2)), rotation=90, fontsize=8)
#text(8,max(B3)+4,'n= '+str(len(B3)), rotation=90, fontsize=8)
#text(9,max(B4)+4,'n= '+str(len(B4)), rotation=90, fontsize=8)
#text(11,max(C1)+100,'n= '+str(len(C1)), rotation=0, fontsize=8)
#text(12,max(C2)+4,'n= '+str(len(C2)), rotation=90, fontsize=8)
#text(13,max(C3)+4,'n= '+str(len(C3)), rotation=90, fontsize=8)
#text(14,max(C4)+4,'n= '+str(len(C4)), rotation=90, fontsize=8)
savefig(arcpy.GetParameterAsText(9))
show()
| gpl-2.0 |
jjx02230808/project0223 | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
alan-unravel/bokeh | bokeh/compat/bokeh_renderer.py | 6 | 16979 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import itertools
import warnings
import matplotlib as mpl
import numpy as np
import pandas as pd
from six import string_types
from ..models import (ColumnDataSource, FactorRange, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, GridPlot, LinearAxis, Plot, CategoricalAxis, Legend)
from ..models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from ..plotting import DEFAULT_TOOLS
from ..plotting_helpers import _process_tools_arg
from .mplexporter.renderers import Renderer
from .mpl_helpers import convert_dashes, get_props_cycled, is_ax_end, xkcd_line
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, pd_obj, xkcd):
"Initial setup."
self.fig = None
self.pd_obj = pd_obj
self.xkcd = xkcd
self.zorder = {}
self.handles = {}
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=DataRange1d(),
y_range=DataRange1d(),
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs = _process_tools_arg(self.plot, DEFAULT_TOOLS)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
self.plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = [list(x[1]) for x in itertools.groupby(
self.plot.renderers, lambda x: is_ax_end(x)) if not x[0]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.plot.x_range,
y_range=self.plot.y_range,
plot_width=self.width,
plot_height=self.height)
_plot.title = ""
# and add new tools
_tool_objs = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
for r in _plot_rends:
if isinstance(r, GlyphRenderer):
_plot.renderers.append(r)
elif isinstance(r, Grid):
_plot.add_layout(r)
else:
if r in self.plot.below:
_plot.add_layout(r, 'below')
elif r in self.plot.above:
_plot.add_layout(r, 'above')
elif r in self.plot.left:
_plot.add_layout(r, 'left')
elif r in self.plot.right:
_plot.add_layout(r, 'right')
_plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = GridPlot(children=n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title = ax.get_title()
# to avoid title conversion by draw_text later
self.grid = ax.get_xgridlines()[0]
# Add axis
for props in props['axes']:
if props['position'] == "bottom" : location, dim, thing = "below", 0, ax.xaxis
elif props['position'] == "top" : location, dim, thing = "above", 0, ax.xaxis
else: location, dim, thing = props['position'], 1, ax.yaxis
baxis = self.make_axis(thing, location, props)
self.make_grid(baxis, dim)
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
background_fill = ax.get_axis_bgcolor()
if background_fill == 'w':
background_fill = 'white'
self.plot.background_fill = background_fill
if self.xkcd:
self.plot.title_text_font = "Comic Sans MS, Textile, cursive"
self.plot.title_text_font_style = "bold"
self.plot.title_text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
dummy_source = ColumnDataSource(data=dict(name="ax_end"))
self.plot.renderers.append(GlyphRenderer(data_source=dummy_source, glyph=X()))
def open_legend(self, legend, props):
lgnd = Legend(orientation="top_right")
try:
for label, obj in zip(props['labels'], props['handles']):
lgnd.legends.append((label, [self.handles[id(obj)]]))
self.plot.add_layout(lgnd)
except KeyError:
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if self.pd_obj is True:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
source = ColumnDataSource()
line.x = source.add(x)
line.y = source.add(y)
line.line_color = style['color']
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [] if style['dasharray'] is "none" else [int(i) for i in style['dasharray'].split(",")] # str2list(int)
# line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
# line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
r = self.plot.add_glyph(source, line)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"d": Diamond,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
marker.line_color = style['edgecolor']
marker.fill_color = style['facecolor']
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
pass
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text_type in ['xlabel', 'ylabel', 'title']:
return
if coordinates != 'data':
return
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, deafulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = style['color']
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
source = ColumnDataSource()
r = self.plot.add_glyph(source, text)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, props):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
tf = props['tickformat']
if tf and any(isinstance(x, string_types) for x in tf):
laxis = CategoricalAxis(axis_label=ax.get_label_text())
rng = FactorRange(factors=[str(x) for x in tf], offset=-1.0)
if location in ["above", "below"]:
self.plot.x_range = rng
else:
self.plot.y_range = rng
else:
if props['scale'] == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif props['scale'] == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
label = ax.get_label()
self.text_props(label, laxis, prefix="axis_label_")
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
ticktext = ax.get_ticklabels()[0]
self.text_props(ticktext, laxis, prefix="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=self.grid.get_color(),
grid_line_width=self.grid.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
source = ColumnDataSource()
multiline.xs = source.add(xs)
multiline.ys = source.add(ys)
self.multiline_props(source, multiline, col)
r = self.plot.add_glyph(source, multiline)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
xs = []
ys = []
for path in col.get_paths():
for sub_poly in path.to_polygons():
xx, yy = sub_poly.transpose()
xs.append(xx)
ys.append(yy)
patches = Patches()
source = ColumnDataSource()
patches.xs = source.add(xs)
patches.ys = source.add(ys)
self.patches_props(source, patches, col)
r = self.plot.add_glyph(source, patches)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def multiline_props(self, source, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = source.add(colors)
multiline.line_width = source.add(widths)
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, source, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.fill_color = source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.line_color = source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = source.add(widths)
patches.line_alpha = col.get_alpha()
patches.fill_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def text_props(self, text, obj, prefix=""):
fp = text.get_font_properties()
setattr(obj, prefix+"text_font", fp.get_family()[0])
setattr(obj, prefix+"text_font_size", "%fpt" % fp.get_size_in_points())
setattr(obj, prefix+"text_font_style", fp.get_style())
| bsd-3-clause |
exa-analytics/exatomic | exatomic/mpl.py | 2 | 23126 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Custom Axes
###################
"""
#
#import seaborn as sns
#
#from exa.mpl import _plot_contour, _plot_surface
#from exatomic import Energy
#
#
#def _get_minimum(mindf):
# absmin = mindf[mindf[2] == mindf[2].min()]
# idxs = mindf[(mindf[0] > 0) & (mindf[1] > 0)].index.values
# id0, id1 = idxs[:2]
# cnt = 1
# try:
# while np.isclose(id0 + 1, id1):
# id0, id1 = idxs[cnt:cnt + 2]
# cnt += 1
# slc = slice(idxs[0], id0 + 1)
# amin = mindf.ix[idxs[0]:id0 + 1]
# except:
# if absmin.index[0] in idxs:
# slc = list(idxs) + [idxs[-1] + 1]
# amin = mindf.ix[idxs]
# else:
# slc = list(idxs) + list(absmin.index.values)
# return mindf.ix[slc]
#
#
#def plot_j2_surface(data, key='j2', method='wireframe', nxlabel=6,
# nylabel=6, nzlabel=6, minimum=False, figsize=(8,6),
# alpha=0.5, cmap=None, title=None):
# cmap = sns.mpl.pyplot.cm.get_cmap('coolwarm') if cmap is None else cmap
# figargs = {'figsize': figsize}
# axargs = {'alpha': alpha, 'cmap': cmap}
# fig = _plot_surface(data['alpha'], data['gamma'], data['j2'],
# nxlabel, nylabel, nzlabel, method, figargs, axargs)
# ax = fig.gca()
# if 'min' in data and minimum:
# mindf = _get_minimum(data['min'])
# ax.plot(mindf[0], mindf[1], mindf[2], color='k', zorder=2)
# ax.set_ylabel(r'$\gamma$')
# ax.set_xlabel(r'$\\\alpha$')
# ax.set_zlabel(r'J$^{2}$')
# if title is not None:
# ax.set_title(title)
# return fig
#
#
#def plot_j2_contour(data, vmin=None, vmax=None, key='j2', figsize=(8,6),
# nxlabel=6, nylabel=6, method='pcolor', cmap=None, title=None,
# minline=False, minpoint=False, legend=False, colorbar=False):
# vmin = data[key].min() if vmin is None else vmin
# vmax = data[key].max() if vmax is None else vmax
# cmap = sns.mpl.pyplot.cm.get_cmap('coolwarm') if cmap is None else cmap
# figargs = {'figsize': figsize}
# axargs = {'vmin': vmin, 'vmax': vmax, 'cmap': cmap,
# 'zorder': 1, 'rasterized': True}
# fig, cbar = _plot_contour(data['alpha'], data['gamma'], data[key],
# nxlabel, nylabel, method, colorbar, figargs, axargs)
# ax = fig.gca()
# if (minline or minpoint) and 'min' in data:
# mindf = _get_minimum(data['min'])
# if minline:
# ax.plot(mindf[0], mindf[1], label='Min.(J$^{2}$)', color='k', zorder=2)
# if minpoint:
# jmin = mindf[2].argmin()
# labl = '({:.4f},{:.4f})'.format(mindf[0][jmin], mindf[1][jmin])
# ax.scatter([mindf[0][jmin]], [mindf[1][jmin]], label=labl,
# marker='*', color='y', s=200, zorder=3)
# if legend:
# hdls, lbls = ax.get_legend_handles_labels()
# leg = ax.legend(hdls, lbls)
# leg.get_frame().set_alpha(0.5)
# ax.set_ylabel(r'$\gamma$')
# ax.set_xlabel(r'$\\\alpha$')
# if title is not None:
# ax.set_title(title)
# return fig
#
#def photoelectron_spectrum(*unis, filters=None, broaden=0.06, color=None,
# stepe=1, units='eV', fontsize=20, peaklabels=True,
# xlim=None, extra=None, figsize=(10,10)):
# """
# Plot what is essentially a density of states for any number of universes,
# attempting to associate symmetry labels in order of peak positions.
#
# Args
# unis (exatomic.container.Universe): any number of universes with orbitals
# filters (dict,list): dict or list of dicts for each universe
# accepted kwargs: 'shift', uni.orbital column names
# special kwargs: 'shift' shifts energies,
# ['energy', 'eV', units] must be in the form of [min, max]
# Note: values can be strings defining relationships like
# {'occupation': '> 0'}
# units (str): the units in which to display the spectrum
# broaden (float): how broad to convolute each orbital energy (FWHM gaussian)
# color (list): commonly sns.color_palette or ['r', 'g', 'b', ...]
# stepe (int,float): how far to separate symmetry labels on plot (modify for
# units other than 'eV')
# fontsize (int): font size of text on plot (symmetry labels are fontsize - 2)
# peaklabels (bool): if True and symmetry in uni.orbital, put labels on plots
# xlim (tuple): (xmin, xmax)
# extra (dict): Custom plot of additional data on the same figure object
# accepted kwargs: ['x', 'y', 'color', 'label']
# figsize (tuple): matplotlib.figure.Figure figuresize keyword arg
#
# Returns
# fig (matplotlib.figure.Figure): the plot
# """
# pass
## unis = [unis] if not isinstance(unis, list) else unis
## if window is None:
## window = []
## for i, uni in enumerate(unis):
## uni.orbital[units] = uni.orbital['energy'] * Energy['Ha', units]
## window.append([uni.orbital.get_orbital(orb=-15)[units],
## uni.orbital.get_orbital()[units]])
## else:
## if not isinstance(window, list): window = window * len(unis)
## if shift or not isinstance(shift, list):
##def photoelectron_spectrum(ax, unis, window=[-10, 0], broaden=0.6,
## shift=0, label='', color=None, stepe=1, units='eV',
## loc='upper left', fontsize=26, peaks=True,
## xlim=None, ylim=None):
## color = ['r', 'g', 'b', 'c', 'y', 'm', 'k'] if color is None else color
## arrowprops = {'arrowstyle': '->', 'connectionstyle': 'arc3'}
## arrowargs = {'xycoords': 'data', 'textcoords': 'data',
## 'arrowprops': arrowprops, 'fontsize': fontsize}
## unis = [unis] if not isinstance(unis, list) else unis
## xmin, xmax = [], []
## if (len(window) != len(unis) or len(unis) == 2): window = window * len(unis)
## if not isinstance(shift, list): shift = [shift] * len(unis)
## if not isinstance(label, list): label = [label] * len(unis)
## for i, uni in enumerate(unis):
## height = len(unis) - 1 - i
## lo, hi = window[i]
## pes = uni.orbital.convolve(ewin=[lo,hi], broaden=broaden, units=units)[::-1]
## pes[units] = -pes[units]
## pes['shifted'] = pes[units] + shift[i]
## heightened = pes['signal'] + height
## lab = uni.name if uni.name and not label[i] else label[i]
## ax.axhline(height, color='k', linewidth=1.2)
## ax.plot(pes['shifted'], heightened, label=lab, color=color[i % len(color)])
## o = uni.orbital
## o = o[(o[units] > lo) & (o[units] < hi) & (o['occupation'] > 0)].drop_duplicates(
## units).copy().drop_duplicates('vector').sort_values(
## by=units, ascending=False).reset_index()
## o[units] = -o[units]
## leno = len(o)
## switch = leno // 2
## nonzero = pes[pes['signal'] > 0.1]['shifted']
## small = nonzero.min()
## esmall = small - stepe * switch
## elarge = nonzero.max()
## xmin.append(esmall)
## dh = 1 / (switch + 3)
## hlo = height + dh
## hhi = height + (switch + switch % 2) * dh
## for t in range(-20, 20):
## ax.plot([t] * 2, [height, height - 0.05], color='k', linewidth=1)
## if peaks:
## for c, (sym, en) in enumerate(zip(o['symmetry'], o[units] + shift[i])):
## ax.plot([en] * 2, [height, height + 0.05], color='k', linewidth=1)
## astr = r'$' + sym[0].lower() + '_{' + sym[1:].lower() + '}$'
## e = esmall if c < switch else elarge
## h = hlo if c < switch else hhi
## ax.annotate(astr, xy=(en, height + 0.05), xytext=(e, h), **arrowargs)
## if c < switch:
## esmall += stepe
## hlo += dh
## else:
## elarge += stepe * 1.5
## hhi -= dh
## xmax.append(elarge)
## xax = 'E* (' + units + ')' if any((i for i in shift)) else 'E (' + units + ')'
## xlim = (min(xmin), max(xmax)) if xlim is None else xlim
## ylim = (0, len(unis)) if ylim is None else ylim
## ax.set_xlim(xlim)
## ax.set_ylim(ylim)
## ax.set_xlabel(xax)
## ax.legend(loc=loc)
## return ax
#def new_pes(*unis, filters=None, broaden=0.06, color=None, stepe=0.5, units='eV',
# fontsize=20, peaklabels=True, xlim=None, extra=None,
# figsize=(10,10), title=None):
# """
# Things
# """
# def plot_details(ax, dos, xmin, xmax, peaklabels):
# switch = len(o) // 2
# nonzero = dos[dos['signal'] > 0.1]['shifted']
# small = nonzero.min()
# esmall = small - stepe * switch
# elarge = nonzero.max()
# xmin.append(esmall - 0.5)
# xmax.append(elarge + 0.5)
# dh = 1 / (switch + 3)
# hlo = dh
# hhi = (switch + switch % 2) * dh
# for c, (sym, en) in enumerate(zip(o['symmetry'], o['shifted'])):
# ax.plot([en] * 2, [0, 0.05], color='k', linewidth=1)
# if peaklabels:
# if '$' in sym: astr = sym
# else: astr = r'$\textrm{' + sym[0].lower() + '}_{\\large \\textrm{' + sym[1:].lower() + '}}$'
# e = esmall if c < switch else elarge
# h = hlo if c < switch else hhi
# ax.text(e, h, astr, fontsize=fontsize - 4)
# if c < switch:
# esmall += stepe
# hlo += dh
# else:
# elarge += stepe * 1.5
# hhi -= dh
# xmax[-1] = elarge
# return ax, xmin, xmax
#
# def plot_extra(ax, extra):
# for i, stargs in enumerate(zip(extra['x'], extra['y'])):
# kwargs = {'color': extra['color']}
# if isinstance(extra['label'], list):
# kwargs['color'] = extra['color'][i]
# kwargs['label'] = extra['label'][i]
# else:
# if not i: kwargs['label'] = extra['label']
# ax.plot(*stargs, **kwargs)
# ax.legend(frameon=False)
# return ax
#
# nuni = len(unis)
# if filters is None:
# print("filters allows for customization of the plot")
# filters = [{'eV': [-10, 0]}] * nuni
# elif isinstance(filters, dict):
# filters = [filters] * nuni
# elif len(filters) == 1 and isinstance(filters, list):
# filters = filters * nuni
# elif len(filters) != nuni:
# raise Exception("Provide a list of filter dicts same as number of unis.")
# nax = nuni + 1 if extra is not None else nuni
# figargs = {'figsize': figsize}
# fig = _gen_figure(nxplot=nax, nyplot=1, joinx=True, figargs=figargs)
# axs = fig.get_axes()
# color = sns.color_palette('cubehelix', nuni) if color is None else color
# xmin, xmax = [], []
# hdls, lbls = [], []
# for i, (uni, ax, fil) in enumerate(zip(unis, axs, filters)):
# if 'energy' in fil: lo, hi = fil['energy']
# elif units in fil: lo, hi = fil[units]
# else: raise Exception('filters must include an energetic keyword')
# shift = fil['shift'] if 'shift' in fil else 0
# lframe = uni.orbital['group'].astype(int).max()
# dos = uni.orbital.convolve(ewin=[lo,hi], broaden=broaden,
# units=units, frame=lframe)
# dos['shifted'] = dos[units] + shift
# lab = uni.name if uni.name is not None \
# else fil['label'] if 'label' in fil else ''
# dos[dos['signal'] > 0.01].plot(ax=ax, x='shifted', y='signal',
# label=lab, color=color[i % len(color)])
# li = uni.orbital['group'].astype(int).max()
# o = uni.orbital[uni.orbital['group'] == li]
# o = o[(o[units] > lo) & (o[units] < hi) & (o['occupation'] > 0)]
# o = o.drop_duplicates(units).copy().drop_duplicates(
# units).sort_values(by=units).reset_index()
# o['shifted'] = o[units] + shift
# ax, xmin, xmax = plot_details(ax, dos, xmin, xmax, peaklabels)
# if extra:
# axs[-1] = plot_extra(axs[-1], extra)
# xlim = (min(xmin), max(xmax)) if xlim is None else xlim
# if title is not None:
# axs[0].set_title(title)
# for i in range(nax):
# if not (i == nax - 1):
# sns.despine(bottom=True, trim=True)
# axs[i].set_xticks([])
# axs[i].set_xlabel('')
# axs[i].legend(frameon=False)
# axs[i].set_xlim(xlim)
# axs[i].set_yticks([])
# axs[i].set_yticklabels([])
# shifted = any(('shift' in fil for fil in filters))
# xax = 'E* (' + units + ')' if shifted else 'E (' + units + ')'
# axs[-1].set_xlabel(xax)
# nx = 2 if abs(xlim[1] - xlim[0]) > 8 else 1
# axs[-1].set_xticks(np.arange(xlim[0], xlim[1] + 1, nx, dtype=int))
# return fig
#
## Example filter for the following mo_diagram function
## applied to orbital table
##
##mofilters[key] = [{'eV': [-7, 5],
## 'occupation': 2,
## 'symmetry': 'EU'}.copy() for i in range(5)]
##mofilters[key][0]['shift'] = 24.7
##mofilters[key][0]['eV'] = [-30, -10]
##mofilters[key][0]['symmetry'] = '$\pi_{u}$'
##mofilters[key][-1]['eV'] = [0, 10]
##mofilters[key][-1]['shift'] = -11.5
#
#def new_mo_diagram(*unis, filters=None, units='eV', width=0.0625,
# pad_degen=0.125, pad_occ=0.03125, scale_occ=1,
# fontsize=22, figsize=(10,8), labelpos='right',
# ylim=None):
# """
# Args
# unis(exatomic.container.Universe): uni or list of unis
# filters(dict): dict or list of dicts for each uni
# accepted kwargs: 'shift', uni.orbital column names
# special kwargs: 'shift' shifts energies,
# ['energy', 'eV', units] must be of the form [min, max]
# Note: values can be strings defining relationships like
# {'occupation': '> 0'}
# units (str): the units in which to display the MO diagram
# width (float): the width of the line of each energy level
# pad_degen (float): the spacing between degenerate energy levels
# pad_occ (float): the spacing between arrows of occupied levels
# scale_occ (float): scales the size of the occupied arrows
# fontsize (int): font size for text on the MO diagram
# figsize (tuple): matplotlib's figure figsize kwarg
# labelpos (str): ['right', 'bottom'] placement of symmetry labels
#
# Returns
# fig (matplotlib.figure.Figure): the plot
# """
# def filter_orbs(o, fil):
# shift = fil['shift'] if 'shift' in fil else 0
# for key, val in fil.items():
# if key == 'shift': continue
# if isinstance(val, str) and \
# any((i in ['<', '>'] for i in val)):
# o = eval('o[o["' + key + '"] ' + val + ']')
# continue
# val = [val] if not isinstance(val,
# (list,tuple)) else val
# if key in [units, 'energy']:
# if len(val) != 2:
# raise Exception('energy (units) '
# 'filter arguments must be [min, max]')
# o = o[(o[key] > val[0]) & (o[key] < val[1])].copy()
# elif key == 'index':
# o = o.ix[val].copy()
# else:
# o = o[o[key].isin(val)].copy()
# return o, shift
#
# def cull_data(o, shift):
# data = OrderedDict()
# # Deduplicate manually to count degeneracy
# for en, sym, occ in zip(o[units], o['symmetry'], o['occupation']):
# en += shift
# if '$' in sym: pass
# else: sym = '${}_{{{}}}$'.format(sym[0].lower(),
# sym[1:].lower())
# data.setdefault(en, {'degen': 0, 'sym': sym, 'occ': occ})
# data[en]['degen'] += 1
# return data
#
# def offset(degen, pad_degen=pad_degen):
# start = 0.5 - pad_degen * (degen - 1)
# return [start + i * 2 * pad_degen for i in range(degen)]
#
# def occoffset(occ, pad_occ=pad_occ):
# if not occ: return []
# if occ <= 1: return [0]
# if occ <= 2: return [-pad_occ, pad_occ]
#
# def plot_axis(ax, data):
# for nrg, vals in data.items():
# # Format the occupation//symmetry
# occ = np.round(vals['occ']).astype(int)
# # Iterate over degeneracy
# offs = offset(vals['degen'])
# for x in offs:
# ax.plot([x - lw, x + lw], [nrg, nrg],
# color='k', lw=1.2)
# # Iterate over occupation
# for s, ocof in enumerate(occoffset(occ)):
# # Down arrow if beta spin else up arrow
# pt = -2 * lw * scale_occ if s == 1 else 2 * lw * scale_occ
# st = nrg + lw * scale_occ if s == 1 else nrg - lw * scale_occ
# ax.arrow(ocof + x, st, 0, pt, **arrows)
# # Assign symmetry label
# sym = vals['sym']
# if labelpos == 'right':
# ax.text(x + 2 * lw, nrg - lw, sym, fontsize=fontsize - 2)
# elif labelpos == 'bottom':
# ax.text(0.5 - 2 * lw, nrg - 4 * lw, sym, fontsize=fontsize - 2)
# return ax
#
# if filters is None:
# print('filters allows for customization of the plot.')
# filters = {'eV': [-5,5]}
# nunis = len(unis)
# filters = [filters] * nunis if isinstance(filters, dict) else filters
# # Make our figure and axes
# figargs = {'figsize': figsize}
# fig = _gen_figure(nxplot=nunis, nyplot=1, joinx=True, sharex=True, figargs=figargs)
# axs = fig.get_axes()
# # Some initialization
# ymin = np.empty(nunis, dtype=np.float64)
# ymax = np.empty(nunis, dtype=np.float64)
# ysc = exatomic.Energy['eV', units]
# lw = width
# arrows = {'fc': "k", 'ec': "k",
# 'head_width': 0.01,
# 'head_length': 0.05 * ysc}
# for i, (ax, uni, fil) in enumerate(zip(axs, unis, filters)):
# if uni.name: ax.set_title(uni.name)
# o = uni.orbital
# o[units] = o['energy'] * exatomic.Energy['Ha', units]
# o, shift = filter_orbs(o, fil)
# print('Filtered {} eigenvalues from '
# '{}'.format(o.shape[0], uni.name))
# ymin[i] = o[units].min() + shift
# ymax[i] = o[units].max() + shift
# data = cull_data(o, shift)
# ax = plot_axis(ax, data)
# # Go back through axes to set limits
# for i, ax in enumerate(axs):
# ax.set_xlim((0,1))
# ax.xaxis.set_ticklabels([])
# ylims = (min(ymin[~np.isnan(ymin)]) - 1, max(ymax[~np.isnan(ymax)]) + 1) \
# if ylim is None else ylim
# ax.set_ylim(ylims)
# if not i:
# ax.set_ylabel('E ({})'.format(units), fontsize=fontsize)
# diff = ylims[1] - ylims[0]
# headlen = 0.05 * diff
# ax.arrow(0.05, ylims[0], 0, diff - headlen, fc="k", ec="k",
# head_width=0.05, head_length= headlen)
# sns.despine(left=True, bottom=True, right=True)
# return fig
#
## unis = [unis] if not isinstance(unis, list) else unis
## if window is None:
## window = []
## for i, uni in enumerate(unis):
## uni.orbital[units] = uni.orbital['energy'] * Energy['Ha', units]
## window.append([uni.orbital.get_orbital(orb=-15)[units],
## uni.orbital.get_orbital()[units]])
## else:
## if not isinstance(window, list): window = window * len(unis)
## if shift or not isinstance(shift, list):
##def photoelectron_spectrum(ax, unis, window=[-10, 0], broaden=0.6,
## shift=0, label='', color=None, stepe=1, units='eV',
## loc='upper left', fontsize=26, peaks=True,
## xlim=None, ylim=None):
## color = ['r', 'g', 'b', 'c', 'y', 'm', 'k'] if color is None else color
## arrowprops = {'arrowstyle': '->', 'connectionstyle': 'arc3'}
## arrowargs = {'xycoords': 'data', 'textcoords': 'data',
## 'arrowprops': arrowprops, 'fontsize': fontsize}
## unis = [unis] if not isinstance(unis, list) else unis
## xmin, xmax = [], []
## if (len(window) != len(unis) or len(unis) == 2): window = window * len(unis)
## if not isinstance(shift, list): shift = [shift] * len(unis)
## if not isinstance(label, list): label = [label] * len(unis)
## for i, uni in enumerate(unis):
## height = len(unis) - 1 - i
## lo, hi = window[i]
## pes = uni.orbital.convolve(ewin=[lo,hi], broaden=broaden, units=units)[::-1]
## pes[units] = -pes[units]
## pes['shifted'] = pes[units] + shift[i]
## heightened = pes['signal'] + height
## lab = uni.name if uni.name and not label[i] else label[i]
## ax.axhline(height, color='k', linewidth=1.2)
## ax.plot(pes['shifted'], heightened, label=lab, color=color[i % len(color)])
## o = uni.orbital
## o = o[(o[units] > lo) & (o[units] < hi) & (o['occupation'] > 0)].drop_duplicates(
## units).copy().drop_duplicates('vector').sort_values(
## by=units, ascending=False).reset_index()
## o[units] = -o[units]
## leno = len(o)
## switch = leno // 2
## nonzero = pes[pes['signal'] > 0.1]['shifted']
## small = nonzero.min()
## esmall = small - stepe * switch
## elarge = nonzero.max()
## xmin.append(esmall)
## dh = 1 / (switch + 3)
## hlo = height + dh
## hhi = height + (switch + switch % 2) * dh
## for t in range(-20, 20):
## ax.plot([t] * 2, [height, height - 0.05], color='k', linewidth=1)
## if peaks:
## for c, (sym, en) in enumerate(zip(o['symmetry'], o[units] + shift[i])):
## ax.plot([en] * 2, [height, height + 0.05], color='k', linewidth=1)
## astr = r'$' + sym[0].lower() + '_{' + sym[1:].lower() + '}$'
## e = esmall if c < switch else elarge
## h = hlo if c < switch else hhi
## ax.annotate(astr, xy=(en, height + 0.05), xytext=(e, h), **arrowargs)
## if c < switch:
## esmall += stepe
## hlo += dh
## else:
## elarge += stepe * 1.5
## hhi -= dh
## xmax.append(elarge)
## xax = 'E* (' + units + ')' if any((i for i in shift)) else 'E (' + units + ')'
## xlim = (min(xmin), max(xmax)) if xlim is None else xlim
## ylim = (0, len(unis)) if ylim is None else ylim
## ax.set_xlim(xlim)
## ax.set_ylim(ylim)
## ax.set_xlabel(xax)
## ax.legend(loc=loc)
## return ax
| apache-2.0 |
JunkieStyle/corgi | corgi/plots.py | 1 | 10233 | import pandas as pd
import seaborn as sns
from scipy.stats import ks_2samp
import numpy as np
import matplotlib.pylab as plt
def quantile_func(q_value):
return lambda x: np.percentile(x, q=q_value, interpolation='nearest')
def _plot_sample(temp, ax, order, current_palette, isTrain=True):
for i, (f, c) in enumerate(zip(order.index, current_palette)):
temp[f].plot(ax=ax, color='grey', grid=False, label='_nolegend_')
label = f if isTrain else '_nolegend_'
alpha = 0.5
if i == 0:
ax.fill_between(temp.index, [100] * temp.shape[0], temp.iloc[:, 0], color=c, alpha=alpha, label=label)
else:
ax.fill_between(temp.index, temp.iloc[:, i - 1], temp.iloc[:, i], color=c, alpha=alpha, label=label)
return ax
def numplot(feature, train, test, target, date, frequency='1M', clips=(0, 1.0), grid=False, figsize=(15, 6), rank=None):
# figure subplots
fig = plt.figure(feature, figsize=figsize)
ax = plt.subplot2grid((8, 7), (3, 0), rowspan=5, colspan=5)
ax2 = plt.subplot2grid((8, 7), (3, 5), rowspan=5, sharey=ax)
ax3 = plt.subplot2grid((8, 7), (3, 6), rowspan=5, sharey=ax)
ax4 = plt.subplot2grid((8, 7), (0, 0), rowspan=1, colspan=5)
ax5 = plt.subplot2grid((8, 7), (1, 0), rowspan=2, colspan=5)
ax6 = plt.subplot2grid((8, 7), (0, 5), rowspan=2, colspan=2)
# train test outliers
train_feature = pd.Series(train[feature].values, index=train[date].values)
test_feature = pd.Series(test[feature].values, index=test[date].values)
data_feature = train_feature.append(test_feature)
data_clips = data_feature.quantile(clips).values
data_feature = data_feature.clip(data_clips[0], data_clips[1])
train_feature = train_feature.clip(data_clips[0], data_clips[1])
test_feature = test_feature.clip(data_clips[0], data_clips[1])
# max, min dates
train_max_date = train_feature.index.max()
test_min_date = test_feature.index.min()
# ax2: KDE of all Train/Test
sns.kdeplot(train_feature[train_feature.notnull()], shade=True, label=u'train', vertical=True, ax=ax2)
sns.kdeplot(test_feature[test_feature.notnull()], shade=True, label=u'test', vertical=True, ax=ax2)
ax2.set_title('KS: %g' % ks_2samp(train_feature[train_feature.notnull()], test_feature[test_feature.notnull()])[0])
ax2.grid()
ax2.set_xticks([])
# ax3: KDE of Train with respect to target
# Only for boolean target
target_mask = pd.Series(train[target].values, index=train[date].values).astype(bool)
sns.kdeplot(train_feature[~target_mask & train_feature.notnull()],
shade=True, label=u'0', vertical=True, ax=ax3, color='red', alpha=0.3)
sns.kdeplot(train_feature[target_mask & train_feature.notnull()],
shade=True, label=u'1', vertical=True, ax=ax3, color='purple', alpha=0.3)
ax3.set_title('KS: %g' % ks_2samp(train_feature[target_mask], train_feature[~target_mask])[0])
ax3.grid()
ax3.set_xticks([])
# ax: Main plot
ax_feature = data_feature[data_feature.notnull()]
ax_feature = ax_feature.resample(frequency)
ax_feature = ax_feature.agg([np.mean, np.std, min, max,
quantile_func(5), quantile_func(25), quantile_func(75), quantile_func(95)])
ax_feature.columns = ['mean', 'std', 'min', 'max', 'q5', 'q25', 'q75', 'q95']
ax_feature['mean'].plot(ax=ax, markersize=5, marker='o', color='black', label='mean')
ax_feature['min'].plot(ax=ax, color='grey', linestyle='--')
ax_feature['max'].plot(ax=ax, color='grey', linestyle='--')
ax_feature_train = ax_feature[(ax_feature.index <= train_max_date) |
((ax_feature.index < test_min_date) & (ax_feature.index > train_max_date))]
ax_feature_test = ax_feature[ax_feature.index >= test_min_date]
ax.fill_between(ax_feature_train.index, ax_feature_train['min'], ax_feature_train['max'], alpha=0.05, color='b')
ax.fill_between(ax_feature_train.index, ax_feature_train['q5'], ax_feature_train['q95'], alpha=0.05, color='b')
ax.fill_between(ax_feature_train.index, ax_feature_train['q25'], ax_feature_train['q75'], alpha=0.05, color='b')
ax.fill_between(ax_feature_test.index, ax_feature_test['min'], ax_feature_test['max'], alpha=0.05, color='g')
ax.fill_between(ax_feature_test.index, ax_feature_test['q5'], ax_feature_test['q95'], alpha=0.05, color='g')
ax.fill_between(ax_feature_test.index, ax_feature_test['q25'], ax_feature_test['q75'], alpha=0.05, color='g')
ax.fill_betweenx([(ax_feature_train['min'].iloc[-1] + ax_feature_test['min'].iloc[-1]) / 2.,
(ax_feature_train['max'].iloc[-1] + ax_feature_test['max'].iloc[-1]) / 2.],
[ax_feature_train.index.max(), ax_feature_train.index.max()],
[ax_feature_test.index.min(), ax_feature_test.index.min()], color='grey', alpha=0.2)
ax.grid()
ax.set_xticks(ax_feature.index)
ax.set_ylabel(feature)
if grid:
ax.grid()
# ax4: Total Count plot
temp = pd.Series(data_feature.isnull(), index=data_feature.index).resample(frequency).count()
temp = temp / temp.sum()
temp.plot(ax=ax4, use_index=False, color='grey', marker='o', markersize=5, label='Count %')
ax4.fill_between(range(0, temp.shape[0]), [0] * temp.shape[0], temp.values, alpha=0.5, color='grey')
ax4.grid()
ax4.set_yticks([])
ax4.set_xticks([])
ax4.legend()
ax4.set_title(feature)
# ax5: NaN Ratio plot
if data_feature.isnull().sum() > 0:
temp = pd.Series(data_feature.isnull(), index=data_feature.index).resample(frequency).mean()
temp.plot(ax=ax5, use_index=False, color='grey', marker='o', markersize=5, label='NaN %')
ax5.fill_between(range(0, temp.shape[0]), [0] * temp.shape[0], temp.values, alpha=0.5, color='grey')
temp = temp / temp.max()
temp.plot(ax=ax5, use_index=False, color='grey', label='NaN rel %', linestyle='--')
ax5.legend()
ax5.set_xticks([])
ax5.set_ylim([0, 1])
ax5.set_yticks([])
# ax6: Rank Info
ax6.set_xticks([])
ax6.set_yticks([])
if rank:
ax6.text(0.2, 0.4, 'Rank: {0}'.format(rank), fontsize=18)
return fig
def catplot(feature, train, test, target, date, frequency='1M', clip=0.01, grid=False, figsize=(15, 6), rank=None):
# figure subplots
fig = plt.figure(feature, figsize=figsize)
ax = plt.subplot2grid((6, 7), (1, 0), rowspan=5, colspan=5)
ax2 = plt.subplot2grid((6, 7), (1, 6), rowspan=5, colspan=1)
ax3 = plt.subplot2grid((6, 7), (0, 0), rowspan=1, colspan=5)
ax4 = plt.subplot2grid((6, 7), (0, 6), rowspan=1, colspan=1)
# Train Test outliers and NaNs
train_feature = pd.Series(train[feature].values, index=train[date].values).fillna('NaN').astype(str)
test_feature = pd.Series(test[feature].values, index=test[date].values).fillna('NaN').astype(str)
data_feature = train_feature.append(test_feature).fillna('NaN').astype(str)
vc = data_feature.value_counts() / data_feature.shape[0]
to_join_other = vc[vc < clip].index
data_feature.loc[data_feature.isin(to_join_other)] = 'OTHER'
train_feature.loc[train_feature.isin(to_join_other)] = 'OTHER'
test_feature.loc[test_feature.isin(to_join_other)] = 'OTHER'
# Min/max dates for Train and Test
train_max_date = train_feature.index.max()
test_min_date = test_feature.index.min()
# ax: Main plot
ax_feature = data_feature.groupby((pd.TimeGrouper(frequency), data_feature)).count()
ax_feature = ax_feature.to_frame().unstack()
ax_feature.columns = ax_feature.columns.droplevel()
ax_feature = ax_feature.T
ax_feature = (ax_feature / ax_feature.sum(axis=0)).fillna(0)
temp = ax_feature.copy().T
order = temp.max(axis=0)
order = order.sort_values(ascending=False)
current_palette = sns.color_palette('pastel') + sns.color_palette('muted')
ax_feature = ax_feature.loc[order.index]
ax_feature = ax_feature.cumsum(axis=0).T
ax_feature = -ax_feature
ax_feature += pd.DataFrame(np.ones(ax_feature.shape), index=ax_feature.index, columns=ax_feature.columns)
ax_feature *= 100
ax_feature_train = ax_feature[(ax_feature.index <= train_max_date) |
((ax_feature.index < test_min_date) & (ax_feature.index > train_max_date))]
ax_feature_test = ax_feature[ax_feature.index >= test_min_date]
ax = _plot_sample(ax_feature_train, ax, order, current_palette, isTrain=True)
ax = _plot_sample(ax_feature_test, ax, order, current_palette, isTrain=False)
ax.fill_betweenx([0, 100],
[ax_feature_train.index.max(), ax_feature_train.index.max()],
[ax_feature_test.index.min(), ax_feature_test.index.min()], color='grey', alpha=0.2)
ax.legend(bbox_to_anchor=(1.15, 1.01))
ax.set_ylim([0, 100])
ax.set_ylabel('Part of sample, %')
if grid:
ax.grid()
# ax3: Total Count plot
temp = pd.Series(data_feature.isnull(), index=data_feature.index).resample(frequency).count()
temp = temp / temp.sum()
temp.plot(ax=ax3, use_index=False, color='grey', marker='o', markersize=5, label='Count %')
ax3.fill_between(range(0, temp.shape[0]), [0] * temp.shape[0], temp.values, alpha=0.5, color='grey')
ax3.grid()
ax3.set_yticks([])
ax3.set_xticks([])
ax3.legend()
ax3.set_title(feature)
# ax2: Mean target value per category
# works only with boolean target
g = train.groupby(train_feature.values)[target].mean()
g = g.reset_index()
g.columns = ['val', target]
g.val = g.val.astype(str)
sns.barplot(x=target, y='val', data=g, palette=current_palette, order=order.index.astype(str), alpha=0.5, ax=ax2)
ax2.grid(False)
ax2.set_xlabel('Mean of TARGET')
ax2.set_ylabel('')
base = train[target].mean()
ax2.plot([base, base], ax2.get_ylim(), linestyle='--', color='grey')
ax2.set_yticks([])
ax2.set_xticks([round(i, 2) for i in ax2.get_xlim() + (base,)])
# ax4
ax4.set_xticks([])
ax4.set_yticks([])
if rank is None:
rank = 'NA'
ax4.text(0.05, 0.4, 'Rank: {0}'.format(rank), fontsize=16)
return fig
| mit |
joetidwell/daftHM | examples/exoplanets.py | 7 | 1616 | """
The Fergus model of exoplanet detection
=======================================
Besides being generally awesome, this example also demonstrates how you can
color the nodes and add arbitrary labels to the figure.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
# Colors.
p_color = {"ec": "#46a546"}
s_color = {"ec": "#f89406"}
pgm = daft.PGM([3.6, 3.5], origin=[0.7, 0])
n = daft.Node("phi", r"$\phi$", 1, 3, plot_params=s_color)
n.va = "baseline"
pgm.add_node(n)
pgm.add_node(daft.Node("speckle_coeff", r"$z_i$", 2, 3, plot_params=s_color))
pgm.add_node(daft.Node("speckle_img", r"$x_i$", 2, 2, plot_params=s_color))
pgm.add_node(daft.Node("spec", r"$s$", 4, 3, plot_params=p_color))
pgm.add_node(daft.Node("shape", r"$g$", 4, 2, plot_params=p_color))
pgm.add_node(daft.Node("planet_pos", r"$\mu_i$", 3, 3, plot_params=p_color))
pgm.add_node(daft.Node("planet_img", r"$p_i$", 3, 2, plot_params=p_color))
pgm.add_node(daft.Node("pixels", r"$y_i ^j$", 2.5, 1, observed=True))
# Edges.
pgm.add_edge("phi", "speckle_coeff")
pgm.add_edge("speckle_coeff", "speckle_img")
pgm.add_edge("speckle_img", "pixels")
pgm.add_edge("spec", "planet_img")
pgm.add_edge("shape", "planet_img")
pgm.add_edge("planet_pos", "planet_img")
pgm.add_edge("planet_img", "pixels")
# And a plate.
pgm.add_plate(daft.Plate([1.5, 0.2, 2, 3.2], label=r"exposure $i$",
shift=-0.1))
pgm.add_plate(daft.Plate([2, 0.5, 1, 1], label=r"pixel $j$",
shift=-0.1))
# Render and save.
pgm.render()
pgm.figure.savefig("exoplanets.pdf")
pgm.figure.savefig("exoplanets.png", dpi=150)
| mit |
billy-inn/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ashivni/Metamaterials | python/analysis.py | 1 | 1372 | import numpy
import matplotlib as mpl
import hierarchicalMaterials as hm
import scipy.optimize
def stress_conc(nx=100,ny=70,l0=1,levels=0):
fig = mpl.pyplot.figure()
fig.subplots_adjust(bottom=0.2,left=0.2)
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
"""
#ax.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
for tic in ax.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
"""
ax.set_aspect('equal')
notch_len = []
stress_conc = []
notch_len_frac = numpy.linspace(1.0/8,1.0/2,5)
for nlf in notch_len_frac:
print 'Solving ', nlf
hg = hm.hierarchical_grid(nx,ny,levels,notch=True,l0=l0,notch_len=nlf)
hg._solve()
notch_len.append(hg._level_notch_len[0])
stress_conc.append(hg._level_eqns[0]['curr'].max())
notch_len = numpy.array(notch_len)
stress_conc = numpy.array(stress_conc)
p = scipy.optimize.leastsq(stress_conc_fit_diff,numpy.zeros(2), args = (notch_len, stress_conc))
C = p[0]
line, = ax.plot(notch_len,stress_conc,'ko')
line, = ax.plot(notch_len,stress_conc_fit_func(C,notch_len),'k-')
mpl.pyplot.draw()
return numpy.array(notch_len), numpy.array(stress_conc)
def stress_conc_fit_func(C, nl):
return C[0]*(nl**0.5) + C[1]
def stress_conc_fit_diff(C,nl,sc):
return sc - stress_conc_fit_func(C,nl)
| gpl-3.0 |
Leberwurscht/Python-Guitar-Transcription-Aid | Analyze.py | 1 | 1678 | #!/usr/bin/env python
import gtk, numpy, scipy.ndimage
import matplotlib
import matplotlib.backends.backend_gtkcairo as mpl_backend
def get_power(data):
# apply window
window = numpy.hanning(len(data))
data *= window
# fft
power = numpy.abs(numpy.fft.rfft(data))**2.
return power
def smooth(array, window=3):
smoothed = numpy.convolve(array, numpy.hanning(window), "same")
return smoothed
def find_peaks(frq,power,max_window=3,min_window=3,height=0.0001):
max_filtered = scipy.ndimage.maximum_filter1d(power,size=max_window)
min_filtered = scipy.ndimage.minimum_filter1d(power,size=min_window)
maxima = numpy.logical_and(max_filtered==power, max_filtered-min_filtered>height)
maxima_indices = numpy.nonzero(maxima)[0]
return maxima_indices
class Analyze(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
fig = matplotlib.figure.Figure(figsize=(5,4))
self.ax = fig.add_subplot(111)
vbox = gtk.VBox()
self.add(vbox)
self.figure = mpl_backend.FigureCanvasGTK(fig)
self.figure.set_size_request(500,400)
self.navbar = mpl_backend.NavigationToolbar2Cairo(self.figure, self)
vbox.pack_start(self.figure)
vbox.pack_start(self.navbar, False, False)
def simple_plot(self, x, y, **kwargs):
self.ax.plot(x, y, **kwargs)
def add_line(self, pos, **kwargs):
self.ax.axvline(pos, **kwargs)
def plot_spectrum(self, frq, power):
self.simple_plot(frq, power, color="g")
# self.ax.plot(frq, 10*numpy.log10(power), color="r")
for semitone in xrange(-29,50):
f = 440. * ( 2.**(1./12.) )**semitone
self.ax.axvline(f, color="r")
for maximum in find_peaks(frq, power, 3, 3, 10):
self.ax.axvline(frq[maximum], color="k")
| gpl-3.0 |
AlexanderFabisch/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 53 | 2668 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_eog_artifact_histogram.py | 22 | 1474 | """
========================
Show EOG artifact timing
========================
Compute the distribution of timing for EOG artifacts.
"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
events = mne.find_events(raw, 'STI 014')
eog_event_id = 512
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
raw.add_events(eog_events, 'STI 014')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=True, eog=False)
tmin, tmax = -0.2, 0.5
event_ids = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks)
# Get the stim channel data
pick_ch = mne.pick_channels(epochs.ch_names, ['STI 014'])[0]
data = epochs.get_data()[:, pick_ch, :].astype(int)
data = np.sum((data.astype(int) & 512) == 512, axis=0)
###############################################################################
# Plot EOG artifact distribution
plt.stem(1e3 * epochs.times, data)
plt.xlabel('Times (ms)')
plt.ylabel('Blink counts (from %s trials)' % len(epochs))
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 50 | 13330 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
ThorbenJensen/wifi-locator | src/model_database.py | 1 | 2257 | """Module that provides all connectivity to SQLite database."""
import datetime
import sqlite3
import pandas as pd
database_path = '../data/wifi.sqlite'
def get_signal_matrix(x, signals):
"""Get signals in the format of a feature matrix, to predict location."""
df = pd.DataFrame(data=None, columns=x.columns, index=[0])
df.ix[:, :] = 0
for bssid in signals.bssid:
if bssid not in df.columns:
print('Warning:')
print('A BSSID is not in historic data. ' +
'Consider logging more locations.\n')
continue
df.loc[0, bssid] = signals[signals.bssid == bssid].signal.values[0]
# if dataframe all zeros, throw exception and inform user
if (df.values == 0).all():
raise Exception('None of current wifi hotspots in historic data.' +
'Please log more locations.')
return df
def write_signals_to_db(signals, db=database_path):
"""Write wifi signals to database."""
con = sqlite3.connect(db)
signals.to_sql(name='windows', con=con, if_exists='append', index=False)
con.close()
def log_signals(signals, location, db=database_path):
"""Log current wifi signals."""
signals['timestamp'] = datetime.datetime.utcnow()
signals['location'] = location
write_signals_to_db(signals, db)
def read_log_from_db(db=database_path, drop_na=False):
"""Read signal log as dataframe."""
con = sqlite3.connect(db)
df = pd.read_sql('SELECT * FROM windows', con=con)
con.close()
if drop_na:
df = df.dropna(axis='index', how='any') # only rows with locations
return df
def get_feature_matrix():
"""
Create feature matrix from signal log, sorted by timestamp.
Returns only those entries based on observations with locations.
"""
df = read_log_from_db(drop_na=True)
df = df.pivot(index='timestamp', columns='bssid', values='signal')
df = df.sort_index()
# NaN to 0
df = df.fillna(0)
return df
def get_labels():
"""Return location labels for timestamps."""
df = read_log_from_db(drop_na=True)
df = df[['timestamp', 'location']]
df = df.drop_duplicates()
df = df.set_index('timestamp')
df = df.sort_index()
return df
| apache-2.0 |
rxa254/MoodCube | synapse/sinks/plotMoods.py | 1 | 1960 | #!/usr/bin/env python
from __future__ import division
import sys
import zmq
import pickle
import signal
import numpy as np
import matplotlib as mpl
#mpl.use('qt4agg')
import matplotlib.pyplot as plt
from matplotlib import animation
import logging
from .. import const
from .. import opc
from . import proc
sim = False
def plotJelly(sources, samples=1):
seconds = int(samples)
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(const.MUX_SOURCE)
socket.setsockopt(zmq.SUBSCRIBE, '')
process_data = proc.ProcessData(sources, samples)
def recv_data():
source, msg = socket.recv_multipart()
return pickle.loads(msg)
if sim:
print('---***---')
z0 = process_data(recv_data())
print z0
mpl.rcParams['toolbar'] = 'None'
fig = plt.figure(figsize=(12,1.5))
z = 255 * np.random.random((8,64,3))
for k in range(8):
z[k,:,:] = z0[(k*64)+np.arange(64),:]
im = plt.imshow(z/255, interpolation='nearest')
plt.xticks([])
plt.yticks([])
fig.tight_layout(pad=0, h_pad=0)
def updatefig(z0):
packet = recv_data()
z0 = process_data(packet)
# convert from 512x3 to 8x64x3
z = np.zeros((8,64,3))
for k in range(8):
z[k,:,:] = z0[(k*64)+np.arange(64),:] # 8 x 64 x 3
im.set_array(z/255)
anim = animation.FuncAnimation(
fig, updatefig,
interval = 100,
blit = False, # seems to crash if True
)
plt.show()
else:
jelly = opc.Client(const.OPC_ADDR)
while True:
packet = recv_data()
z = process_data(packet)
jelly.put_pixels(z)
##########
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
plotJelly()
if __name__ == '__main__':
main()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/core/tools/numeric.py | 7 | 5643 | import numpy as np
import pandas as pd
from pandas.core.dtypes.common import (
is_scalar,
is_numeric_dtype,
is_decimal,
is_datetime_or_timedelta_dtype,
is_number,
_ensure_object)
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas._libs import lib
def to_numeric(arg, errors='raise', downcast=None):
"""
Convert argument to a numeric type.
Parameters
----------
arg : list, tuple, 1-d array, or Series
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaN
- If 'ignore', then invalid parsing will return the input
downcast : {'integer', 'signed', 'unsigned', 'float'} , default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
.. versionadded:: 0.19.0
Returns
-------
ret : numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> import pandas as pd
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
"""
if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'):
raise ValueError('invalid downcasting method provided')
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndexClass):
is_index = True
values = arg.asi8
if values is None:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype='O')
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a list, tuple, 1-d array, or Series')
else:
values = arg
try:
if is_numeric_dtype(values):
pass
elif is_datetime_or_timedelta_dtype(values):
values = values.astype(np.int64)
else:
values = _ensure_object(values)
coerce_numeric = False if errors in ('ignore', 'raise') else True
values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=coerce_numeric)
except Exception:
if errors == 'raise':
raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values):
typecodes = None
if downcast in ('integer', 'signed'):
typecodes = np.typecodes['Integer']
elif downcast == 'unsigned' and np.min(values) >= 0:
typecodes = np.typecodes['UnsignedInteger']
elif downcast == 'float':
typecodes = np.typecodes['Float']
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for dtype in typecodes:
if np.dtype(dtype).itemsize <= values.dtype.itemsize:
values = maybe_downcast_to_dtype(values, dtype)
# successful conversion
if values.dtype == dtype:
break
if is_series:
return pd.Series(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy_with_infer
return pd.Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
| agpl-3.0 |
ScreamingUdder/mantid | Framework/PythonInterface/mantid/plots/plotfunctions.py | 1 | 24963 | # This file is part of the mantid package
#
# Copyright (C) 2017 mantidproject
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import numpy
import mantid.kernel
import mantid.api
from mantid.plots.helperfunctions import *
import matplotlib.colors
import matplotlib.dates as mdates
# ================================================
# Private 2D Helper functions
# ================================================
def _setLabels1D(axes, workspace):
'''
helper function to automatically set axes labels for 1D plots
'''
labels = get_axes_labels(workspace)
axes.set_xlabel(labels[1])
axes.set_ylabel(labels[0])
def _setLabels2D(axes, workspace):
'''
helper function to automatically set axes labels for 2D plots
'''
labels = get_axes_labels(workspace)
axes.set_xlabel(labels[1])
axes.set_ylabel(labels[2])
# ========================================================
# Plot functions
# ========================================================
def plot(axes, workspace, *args, **kwargs):
'''
Unpack mantid workspace and render it with matplotlib. ``args`` and
``kwargs`` are passed to :py:meth:`matplotlib.axes.Axes.plot` after special
keyword arguments are removed. This will automatically label the
line according to the spectrum number unless specified otherwise.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param specNum: spectrum number to plot if MatrixWorkspace
:param wkspIndex: workspace index to plot if MatrixWorkspace
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the workspace is a MatrixWorkspace histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
:param LogName: if specified, it will plot the corresponding sample log. The x-axis
of the plot is the time difference between the log time and the first
value of the `proton_charge` log (if available) or the sample log's
first time.
:param StartFromLog: False by default. If True the time difference will be from the sample log's
first time, even if `proton_charge` log is available.
:param FullTime: False by default. If true, the full date and time will be plotted on the axis
instead of the time difference
:param ExperimentInfo: for MD Workspaces with multiple :class:`mantid.api.ExperimentInfo` is the
ExperimentInfo object from which to extract the log. It's 0 by default
For matrix workspaces with more than one spectra, either ``specNum`` or ``wkspIndex``
needs to be specified. Giving both will generate a :class:`RuntimeError`. There is no similar
keyword for MDHistoWorkspaces. These type of workspaces have to have exactly one non integrated
dimension
'''
if 'LogName' in kwargs:
(x, y, FullTime, LogName, units, kwargs) = get_sample_log(workspace, **kwargs)
axes.set_ylabel('{0} ({1})'.format(LogName, units))
axes.set_xlabel('Time (s)')
if FullTime:
axes.xaxis_date()
axes.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S\n%b-%d'))
axes.set_xlabel('Time')
kwargs['linestyle']='steps-post'
return axes.plot(x, y, *args, **kwargs)
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
(x, y, dy) = get_md_data1d(workspace, normalization)
else:
(wkspIndex, distribution, kwargs) = get_wksp_index_dist_and_label(workspace, **kwargs)
(x, y, dy, dx) = get_spectrum(workspace, wkspIndex, distribution, withDy=False, withDx=False)
_setLabels1D(axes, workspace)
return axes.plot(x, y, *args, **kwargs)
def errorbar(axes, workspace, *args, **kwargs):
'''
Unpack mantid workspace and render it with matplotlib. ``args`` and
``kwargs`` are passed to :py:meth:`matplotlib.axes.Axes.errorbar` after special
keyword arguments are removed. This will automatically label the
line according to the spectrum number unless specified otherwise.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param specNum: spectrum number to plot if MatrixWorkspace
:param wkspIndex: workspace index to plot if MatrixWorkspace
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the workspace is a MatrixWorkspace histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
For matrix workspaces with more than one spectra, either ``specNum`` or ``wkspIndex``
needs to be specified. Giving both will generate a :class:`RuntimeError`. There is no similar
keyword for MDHistoWorkspaces. These type of workspaces have to have exactly one non integrated
dimension
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
(x, y, dy) = get_md_data1d(workspace, normalization)
dx = None
else:
(wkspIndex, distribution, kwargs) = get_wksp_index_dist_and_label(workspace, **kwargs)
(x, y, dy, dx) = get_spectrum(workspace, wkspIndex, distribution, withDy=True, withDx=True)
_setLabels1D(axes, workspace)
return axes.errorbar(x, y, dy, dx, *args, **kwargs)
def scatter(axes, workspace, *args, **kwargs):
'''
Unpack mantid workspace and render it with matplotlib. ``args`` and
``kwargs`` are passed to :py:meth:`matplotlib.axes.Axes.scatter` after special
keyword arguments are removed. This will automatically label the
line according to the spectrum number unless specified otherwise.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param specNum: spectrum number to plot if MatrixWorkspace
:param wkspIndex: workspace index to plot if MatrixWorkspace
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the workspace is a MatrixWorkspace histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
For matrix workspaces with more than one spectra, either ``specNum`` or ``wkspIndex``
needs to be specified. Giving both will generate a :class:`RuntimeError`. There is no similar
keyword for MDHistoWorkspaces. These type of workspaces have to have exactly one non integrated
dimension
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
(x, y, _) = get_md_data1d(workspace, normalization)
else:
(wkspIndex, distribution, kwargs) = get_wksp_index_dist_and_label(workspace, **kwargs)
(x, y, _, _) = get_spectrum(workspace, wkspIndex, distribution)
_setLabels1D(axes, workspace)
return axes.scatter(x, y, *args, **kwargs)
def contour(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`matplotlib.axes.Axes.contour`
but calculates the countour levels. Currently this only works with
workspaces that have a constant number of bins between spectra.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
x, y, z = get_md_data2d_bin_centers(workspace, normalization)
else:
(distribution, kwargs) = get_distribution(workspace, **kwargs)
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=False)
_setLabels2D(axes, workspace)
return axes.contour(x, y, z, *args, **kwargs)
def contourf(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`matplotlib.axes.Axes.contourf`
but calculates the countour levels. Currently this only works with
workspaces that have a constant number of bins between spectra.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
x, y, z = get_md_data2d_bin_centers(workspace, normalization)
else:
(distribution, kwargs) = get_distribution(workspace, **kwargs)
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=False)
_setLabels2D(axes, workspace)
return axes.contourf(x, y, z, *args, **kwargs)
def _pcolorpieces(axes, workspace, distribution, *args, **kwargs):
'''
Helper function for pcolor, pcolorfast, and pcolormesh that will
plot a 2d representation of each spectra. The polycollections or meshes
will be normalized to the same intensity limits.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param pcolortype: this keyword allows the plotting to be one of pcolormesh or
pcolorfast if there is "mesh" or "fast" in the value of the keyword, or
pcolor by default
Note: the return is the pcolor, pcolormesh, or pcolorfast of the last spectrum
'''
(x, y, z) = get_uneven_data(workspace, distribution)
pcolortype = kwargs.pop('pcolortype', '')
mini = numpy.min([numpy.min(i) for i in z])
maxi = numpy.max([numpy.max(i) for i in z])
if 'vmin' in kwargs:
mini = kwargs['vmin']
if 'vmax' in kwargs:
maxi = kwargs['vmax']
if 'norm' not in kwargs:
kwargs['norm'] = matplotlib.colors.Normalize(vmin=mini, vmax=maxi)
else:
if kwargs['norm'].vmin is None:
kwargs['norm'].vmin = mini
if kwargs['norm'].vmax is None:
kwargs['norm'].vmax = maxi
for xi, yi, zi in zip(x, y, z):
XX, YY = numpy.meshgrid(xi, yi, indexing='ij')
if 'mesh' in pcolortype.lower():
cm = axes.pcolormesh(XX, YY, zi.reshape(-1, 1), **kwargs)
elif 'fast' in pcolortype.lower():
cm = axes.pcolorfast(XX, YY, zi.reshape(-1, 1), **kwargs)
else:
cm = axes.pcolor(XX, YY, zi.reshape(-1, 1), **kwargs)
return cm
def pcolor(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`matplotlib.axes.Axes.pcolor`
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
:param axisaligned: ``False`` (default). If ``True``, or if the workspace has a variable
number of bins, the polygons will be aligned with the axes
'''
_setLabels2D(axes, workspace)
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
x, y, z = get_md_data2d_bin_bounds(workspace, normalization)
else:
(aligned, kwargs) = get_data_uneven_flag(workspace, **kwargs)
(distribution, kwargs) = get_distribution(workspace, **kwargs)
if aligned:
kwargs['pcolortype'] = ''
return _pcolorpieces(axes, workspace, distribution, *args, **kwargs)
else:
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=True)
return axes.pcolor(x, y, z, *args, **kwargs)
def pcolorfast(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`matplotlib.axes.Axes.pcolorfast`
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
:param axisaligned: ``False`` (default). If ``True``, or if the workspace has a variable
number of bins, the polygons will be aligned with the axes
'''
_setLabels2D(axes, workspace)
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
x, y, z = get_md_data2d_bin_bounds(workspace, normalization)
else:
(aligned, kwargs) = get_data_uneven_flag(workspace, **kwargs)
(distribution, kwargs) = get_distribution(workspace, **kwargs)
if aligned:
kwargs['pcolortype'] = 'fast'
return _pcolorpieces(axes, workspace, distribution, *args, **kwargs)
else:
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=True)
return axes.pcolorfast(x, y, z, *args, **kwargs)
def pcolormesh(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`matplotlib.axes.Axes.pcolormesh`.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
:param axisaligned: ``False`` (default). If ``True``, or if the workspace has a variable
number of bins, the polygons will be aligned with the axes
'''
_setLabels2D(axes, workspace)
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
x, y, z = get_md_data2d_bin_bounds(workspace, normalization)
else:
(aligned, kwargs) = get_data_uneven_flag(workspace, **kwargs)
(distribution, kwargs) = get_distribution(workspace, **kwargs)
if aligned:
kwargs['pcolortype'] = 'mesh'
return _pcolorpieces(axes, workspace, distribution, *args, **kwargs)
else:
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=True)
return axes.pcolormesh(x, y, z, *args, **kwargs)
def tripcolor(axes, workspace, *args, **kwargs):
'''
To be used with non-uniform grids. Currently this only works with workspaces
that have a constant number of bins between spectra or with
MDHistoWorkspaces.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
See :meth:`matplotlib.axes.Axes.tripcolor` for more information.
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
x_temp, y_temp, z = get_md_data2d_bin_centers(workspace, normalization)
x, y = numpy.meshgrid(x_temp, y_temp)
else:
(distribution, kwargs) = get_distribution(workspace, **kwargs)
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=False)
_setLabels2D(axes, workspace)
return axes.tripcolor(x.ravel(), y.ravel(), z.ravel(), *args, **kwargs)
def tricontour(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`mantid.plots.contour`, but works
for non-uniform grids. Currently this only works with workspaces
that have a constant number of bins between spectra or with
MDHistoWorkspaces.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
See :meth:`matplotlib.axes.Axes.tricontour` for more information.
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
(x_temp, y_temp, z) = get_md_data2d_bin_centers(workspace, normalization)
(x, y) = numpy.meshgrid(x_temp, y_temp)
else:
(distribution, kwargs) = get_distribution(workspace, **kwargs)
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=False)
_setLabels2D(axes, workspace)
# tricontour segfaults if many z values are not finite
# https://github.com/matplotlib/matplotlib/issues/10167
x = x.ravel()
y = y.ravel()
z = z.ravel()
condition = numpy.isfinite(z)
x = x[condition]
y = y[condition]
z = z[condition]
return axes.tricontour(x, y, z, *args, **kwargs)
def tricontourf(axes, workspace, *args, **kwargs):
'''
Essentially the same as :meth:`mantid.plots.contourf`, but works
for non-uniform grids. Currently this only works with workspaces
that have a constant number of bins between spectra or with
MDHistoWorkspaces.
:param axes: :class:`matplotlib.axes.Axes` object that will do the plotting
:param workspace: :class:`mantid.api.MatrixWorkspace` or :class:`mantid.api.IMDHistoWorkspace`
to extract the data from
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the matrix workspace is a histogram.
:param normalization: ``None`` (default) ask the workspace. Applies to MDHisto workspaces. It can override
the value from displayNormalizationHisto. It checks only if
the normalization is mantid.api.MDNormalization.NumEventsNormalization
See :meth:`matplotlib.axes.Axes.tricontourf` for more information.
'''
if isinstance(workspace, mantid.dataobjects.MDHistoWorkspace):
(normalization, kwargs) = get_normalization(workspace, **kwargs)
(x_temp, y_temp, z) = get_md_data2d_bin_centers(workspace, normalization)
(x, y) = numpy.meshgrid(x_temp, y_temp)
else:
(distribution, kwargs) = get_distribution(workspace, **kwargs)
(x, y, z) = get_matrix_2d_data(workspace, distribution, histogram2D=False)
_setLabels2D(axes, workspace)
# tricontourf segfaults if many z values are not finite
# https://github.com/matplotlib/matplotlib/issues/10167
x = x.ravel()
y = y.ravel()
z = z.ravel()
condition = numpy.isfinite(z)
x = x[condition]
y = y[condition]
z = z[condition]
return axes.tricontourf(x, y, z, *args, **kwargs)
| gpl-3.0 |
benjaminpope/whisky | geometry/mk_plain.py | 2 | 2914 | #!/usr/bin/env python
''' -------------------------------------------------------
This procedure generates a coordinates file for a hex
pupil made of an arbitrary number of rings.
Additional constraints on the location of spiders make
it look like the your favorite telescope primary mirror
------------------------------------------------------- '''
import numpy as np, matplotlib.pyplot as plt
import time
nr = 50 # rings within the pupil (should be ~> 50)
rmax = 5.093/2. # outer diameter: 5.093 m
rmin = 1.829/2. # central obstruction: 1.829 m
thick = 0 #4*0.257 # adopted spider thickness (meters)
srad = 0.15 # segment "radius"
rad = np.sqrt(3)*srad # radius of the first hex ring in meters
xs = np.array(())
ys = np.array(())
fig = plt.figure(0, figsize=(6,6))
plt.clf()
ax = plt.subplot(111)
circ1 = plt.Circle((0,0), rmax, facecolor='none', linewidth=1)
circ2 = plt.Circle((0,0), rmin, facecolor='none', linewidth=1)
ax.add_patch(circ1)
ax.add_patch(circ2)
#plt.clf()
ax.axis([-rmax,rmax, -rmax,rmax], aspect='equal')
for i in range(1-nr, nr, 1):
for j in xrange(1-nr, nr, 1):
x = srad * (i + 0.5 * j)
y = j * np.sqrt(3)/2.*srad
if (abs(i+j) < nr):
xs = np.append(xs, x)
ys = np.append(ys, y)
# modifications to match the actual telescope pupil (1): diameter constraints
# -----------------------------------------------------------------------
xx, yy = xs.copy(), ys.copy() # temporary copies
xs, ys = np.array(()), np.array(()) # start from scratch again
for i in range(xx.size):
thisrad = np.sqrt(xx[i]**2 + yy[i]**2)
if (1.02 * rmin < thisrad < (0.99 * rmax)):# + 0.1*srad)):
xs = np.append(xs, xx[i])
ys = np.append(ys, yy[i])
# modifications to match the actual telescope pupil (2): spiders
# -----------------------------------------------------------
rm_spiders = False
if rm_spiders:
xx, yy = xs.copy(), ys.copy() # temporary copies
xs, ys = np.array(()), np.array(()) # start from scratch again
for i in range(xx.size):
if (np.abs(xx[i]) > thick/2.) and (np.abs(yy[i]) > thick/2.):
xs = np.append(xs, xx[i])
ys = np.append(ys, yy[i])
# plot segments
# -------------
r0 = srad/np.sqrt(3)
th = 2*np.pi*np.arange(6)/6. + np.pi/6.
rs = np.sqrt(xs**2+ys**2)
# xs *= rmax/rs.max()
# ys *= rmax/rs.max()
for i in range(xs.size):
hx = xs[i] + r0 * np.cos(th)
hy = ys[i] + r0 * np.sin(th)
ax.fill(hx, hy, fc='none', linewidth=1)
ax.plot(xs, ys, 'r.')
np.savetxt("./geometry/plain.txt", np.transpose((xs,ys)),
fmt='%12.9f')
print "--------------------------------------------------"
print "%d pupil sample points were included in the pupil " % xs.size
print "--------------------------------------------------"
plt.show()
| gpl-3.0 |
klauer/bluesky | doc/source/conf.py | 4 | 9634 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# bluesky documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 7 15:25:26 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'matplotlib.sphinxext.plot_directive',
]
# Generate the API documentation when building
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bluesky'
copyright = '2015, Brookhaven National Lab'
author = 'Brookhaven National Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'blueskydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bluesky.tex', 'bluesky Documentation',
'Brookhaven National Lab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bluesky', 'bluesky Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bluesky', 'bluesky Documentation',
author, 'bluesky', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
arabenjamin/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
demianw/dipy | dipy/core/optimize.py | 12 | 15237 | """ A unified interface for performing and debugging optimization problems.
Only L-BFGS-B and Powell is supported in this class for versions of
Scipy < 0.12. All optimizers are available for scipy >= 0.12.
"""
import abc
from distutils.version import LooseVersion
import numpy as np
import scipy
import scipy.sparse as sps
import scipy.optimize as opt
from dipy.utils.six import with_metaclass
SCIPY_LESS_0_12 = LooseVersion(scipy.version.short_version) < '0.12'
if not SCIPY_LESS_0_12:
from scipy.optimize import minimize
else:
from scipy.optimize import fmin_l_bfgs_b, fmin_powell
class Optimizer(object):
def __init__(self, fun, x0, args=(), method='L-BFGS-B', jac=None,
hess=None, hessp=None, bounds=None, constraints=(),
tol=None, callback=None, options=None, evolution=False):
""" A class for handling minimization of scalar function of one or more
variables.
Parameters
----------
fun : callable
Objective function.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (Jacobian, Hessian).
method : str, optional
Type of solver. Should be one of
- 'Nelder-Mead'
- 'Powell'
- 'CG'
- 'BFGS'
- 'Newton-CG'
- 'Anneal'
- 'L-BFGS-B'
- 'TNC'
- 'COBYLA'
- 'SLSQP'
- 'dogleg'
- 'trust-ncg'
jac : bool or callable, optional
Jacobian of objective function. Only for CG, BFGS, Newton-CG,
dogleg, trust-ncg.
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of the
objective. In this case, it must accept the same arguments
as `fun`.
hess, hessp : callable, optional
Hessian of objective function or Hessian of objective function
times an arbitrary vector p. Only for Newton-CG,
dogleg, trust-ncg.
Only one of `hessp` or `hess` needs to be given. If `hess` is
provided, then `hessp` will be ignored. If neither `hess` nor
`hessp` is provided, then the hessian product will be approximated
using finite differences on `jac`. `hessp` must compute the Hessian
times an arbitrary vector.
bounds : sequence, optional
Bounds for variables (only for L-BFGS-B, TNC and SLSQP).
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
constraints : dict or sequence of dict, optional
Constraints definition (only for COBYLA and SLSQP).
Each constraint is defined in a dictionary with fields:
type : str
Constraint type: 'eq' for equality, 'ineq' for inequality.
fun : callable
The function defining the constraint.
jac : callable, optional
The Jacobian of `fun` (only for SLSQP).
args : sequence, optional
Extra arguments to be passed to the function and Jacobian.
Equality constraint means that the constraint function result is to
be zero whereas inequality means that it is to be non-negative.
Note that COBYLA only supports inequality constraints.
tol : float, optional
Tolerance for termination. For detailed control, use
solver-specific options.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is
the current parameter vector. Only available using Scipy >= 0.12.
options : dict, optional
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see
`show_options('minimize', method)`.
evolution : bool, optional
save history of x for each iteration. Only available using Scipy
>= 0.12.
See also
---------
scipy.optimize.minimize
"""
self.size_of_x = len(x0)
self._evol_kx = None
_eps = np.finfo(float).eps
if SCIPY_LESS_0_12:
if evolution is True:
print('Saving history is available only with Scipy >= 0.12.')
if method == 'L-BFGS-B':
default_options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5,
'eps': 1e-8, 'maxiter': 1000}
if jac is None:
approx_grad = True
else:
approx_grad = False
if options is None:
options = default_options
if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options
try:
out = fmin_l_bfgs_b(fun, x0, fprime=jac, args=args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxiter=options['maxiter'])
except TypeError:
msg = 'In Scipy ' + scipy.__version__ + ' `maxiter` '
msg += 'parameter is not available for L-BFGS-B. \n Using '
msg += '`maxfun` instead with value twice of maxiter.'
print(msg)
out = fmin_l_bfgs_b(fun, x0, fprime=jac, args=args,
approx_grad=approx_grad,
bounds=bounds,
m=options['maxcor'],
factr=options['ftol']/_eps,
pgtol=options['gtol'],
epsilon=options['eps'],
maxfun=options['maxiter'] * 2)
res = {'x': out[0], 'fun': out[1], 'nfev': out[2]['funcalls']}
try:
res['nit'] = out[2]['nit']
except KeyError:
res['nit'] = None
elif method == 'Powell':
default_options = {'xtol': 0.0001, 'ftol': 0.0001,
'maxiter': None}
if options is None:
options = default_options
if options is not None:
for key in options:
default_options[key] = options[key]
options = default_options
out = fmin_powell(fun, x0, args,
xtol=options['xtol'],
ftol=options['ftol'],
maxiter=options['maxiter'],
full_output=True,
disp=False,
retall=True)
xopt, fopt, direc, iterations, funcs, warnflag, allvecs = out
res = {'x': xopt, 'fun': fopt,
'nfev': funcs, 'nit': iterations}
else:
msg = 'Only L-BFGS-B and Powell is supported in this class '
msg += 'for versions of Scipy < 0.12.'
raise ValueError(msg)
if not SCIPY_LESS_0_12:
if evolution is True:
self._evol_kx = []
def history_of_x(kx):
self._evol_kx.append(kx)
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback=history_of_x,
options=options)
else:
res = minimize(fun, x0, args, method, jac, hess, hessp, bounds,
constraints, tol, callback, options)
self.res = res
@property
def xopt(self):
return self.res['x']
@property
def fopt(self):
return self.res['fun']
@property
def nit(self):
return self.res['nit']
@property
def nfev(self):
return self.res['nfev']
@property
def message(self):
return self.res['message']
def print_summary(self):
print(self.res)
@property
def evolution(self):
if self._evol_kx is not None:
return np.asarray(self._evol_kx)
else:
return None
def spdot(A, B):
"""The same as np.dot(A, B), except it works even if A or B or both
are sparse matrices.
Parameters
----------
A, B : arrays of shape (m, n), (n, k)
Returns
-------
The matrix product AB. If both A and B are sparse, the result will be a
sparse matrix. Otherwise, a dense result is returned
See discussion here:
http://mail.scipy.org/pipermail/scipy-user/2010-November/027700.html
"""
if sps.issparse(A) and sps.issparse(B):
return A * B
elif sps.issparse(A) and not sps.issparse(B):
return (A * B).view(type=B.__class__)
elif not sps.issparse(A) and sps.issparse(B):
return (B.T * A.T).T.view(type=A.__class__)
else:
return np.dot(A, B)
def sparse_nnls(y, X,
momentum=1,
step_size=0.01,
non_neg=True,
check_error_iter=10,
max_error_checks=10,
converge_on_sse=0.99):
"""
Solve y=Xh for h, using gradient descent, with X a sparse matrix
Parameters
----------
y : 1-d array of shape (N)
The data. Needs to be dense.
X : ndarray. May be either sparse or dense. Shape (N, M)
The regressors
momentum : float, optional (default: 1).
The persistence of the gradient.
step_size : float, optional (default: 0.01).
The increment of parameter update in each iteration
non_neg : Boolean, optional (default: True)
Whether to enforce non-negativity of the solution.
check_error_iter : int (default:10)
How many rounds to run between error evaluation for
convergence-checking.
max_error_checks : int (default: 10)
Don't check errors more than this number of times if no improvement in
r-squared is seen.
converge_on_sse : float (default: 0.99)
a percentage improvement in SSE that is required each time to say
that things are still going well.
Returns
-------
h_best : The best estimate of the parameters.
"""
num_regressors = X.shape[1]
# Initialize the parameters at the origin:
h = np.zeros(num_regressors)
# If nothing good happens, we'll return that:
h_best = h
gradient = np.zeros(num_regressors)
iteration = 1
ss_residuals_min = np.inf # This will keep track of the best solution
sse_best = np.inf # This will keep track of the best performance so far
count_bad = 0 # Number of times estimation error has gone up.
error_checks = 0 # How many error checks have we done so far
while 1:
if iteration > 1:
# The sum of squared error given the current parameter setting:
sse = np.sum((y - spdot(X, h)) ** 2)
# The gradient is (Kay 2008 supplemental page 27):
gradient = spdot(X.T, spdot(X, h) - y)
gradient += momentum * gradient
# Normalize to unit-length
unit_length_gradient = (gradient /
np.sqrt(np.dot(gradient, gradient)))
# Update the parameters in the direction of the gradient:
h -= step_size * unit_length_gradient
if non_neg:
# Set negative values to 0:
h[h < 0] = 0
# Every once in a while check whether it's converged:
if np.mod(iteration, check_error_iter):
# This calculates the sum of squared residuals at this point:
sse = np.sum((y - spdot(X, h)) ** 2)
# Did we do better this time around?
if sse < ss_residuals_min:
# Update your expectations about the minimum error:
ss_residuals_min = sse
h_best = h # This holds the best params we have so far
# Are we generally (over iterations) converging on
# sufficient improvement in r-squared?
if sse < converge_on_sse * sse_best:
sse_best = sse
count_bad = 0
else:
count_bad += 1
else:
count_bad += 1
if count_bad >= max_error_checks:
return h_best
error_checks += 1
iteration += 1
class SKLearnLinearSolver(with_metaclass(abc.ABCMeta, object)):
"""
Provide a sklearn-like uniform interface to algorithms that solve problems
of the form: $y = Ax$ for $x$
Sub-classes of SKLearnLinearSolver should provide a 'fit' method that have
the following signature: `SKLearnLinearSolver.fit(X, y)`, which would set
an attribute `SKLearnLinearSolver.coef_`, with the shape (X.shape[1],),
such that an estimate of y can be calculated as:
`y_hat = np.dot(X, SKLearnLinearSolver.coef_.T)`
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
@abc.abstractmethod
def fit(self, X, y):
"""Implement for all derived classes """
def predict(self, X):
"""
Predict using the result of the model
Parameters
----------
X : array-like (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Predicted values.
"""
X = np.asarray(X)
return np.dot(X, self.coef_.T)
class NonNegativeLeastSquares(SKLearnLinearSolver):
"""
A sklearn-like interface to scipy.optimize.nnls
"""
def fit(self, X, y):
"""
Fit the NonNegativeLeastSquares linear model to data
Parameters
----------
"""
coef, rnorm = opt.nnls(X, y)
self.coef_ = coef
return self
| bsd-3-clause |
fmder/deap | examples/ga/xkcd.py | 12 | 4340 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example shows a possible answer to a problem that can be found in this
xkcd comics: http://xkcd.com/287/. In the comic, the characters want to get
exactly 15.05$ worth of appetizers, as fast as possible."""
import random
from operator import attrgetter
from collections import Counter
# We delete the reduction function of the Counter because it doesn't copy added
# attributes. Because we create a class that inherit from the Counter, the
# fitness attribute was not copied by the deepcopy.
del Counter.__reduce__
import numpy
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
IND_INIT_SIZE = 3
# Create the item dictionary: item id is an integer, and value is
# a (name, weight, value) 3-uple. Since the comic didn't specified a time for
# each menu item, random was called to generate a time.
ITEMS_NAME = "Mixed Fruit", "French Fries", "Side Salad", "Hot Wings", "Mozzarella Sticks", "Sampler Plate"
ITEMS_PRICE = 2.15, 2.75, 3.35, 3.55, 4.2, 5.8
ITEMS = dict((name, (price, random.uniform(1, 5))) for name, price in zip(ITEMS_NAME, ITEMS_PRICE))
creator.create("Fitness", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", Counter, fitness=creator.Fitness)
toolbox = base.Toolbox()
toolbox.register("attr_item", random.choice, ITEMS_NAME)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_item, IND_INIT_SIZE)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def evalXKCD(individual, target_price):
"""Evaluates the fitness and return the error on the price and the time
taken by the order if the chef can cook everything in parallel."""
price = 0.0
times = list()
for item, number in individual.items():
price += ITEMS[item][0] * number
times.append(ITEMS[item][1])
return abs(price - target_price), max(times)
def cxCounter(ind1, ind2, indpb):
"""Swaps the number of perticular items between two individuals"""
for key in ITEMS.keys():
if random.random() < indpb:
ind1[key], ind2[key] = ind2[key], ind1[key]
return ind1, ind2
def mutCounter(individual):
"""Adds or remove an item from an individual"""
if random.random() > 0.5:
individual.update([random.choice(ITEMS_NAME)])
else:
val = random.choice(ITEMS_NAME)
individual.subtract([val])
if individual[val] < 0:
del individual[val]
return individual,
toolbox.register("evaluate", evalXKCD, target_price=15.05)
toolbox.register("mate", cxCounter, indpb=0.5)
toolbox.register("mutate", mutCounter)
toolbox.register("select", tools.selNSGA2)
def main():
NGEN = 40
MU = 100
LAMBDA = 200
CXPB = 0.3
MUTPB = 0.6
pop = toolbox.population(n=MU)
hof = tools.ParetoFront()
price_stats = tools.Statistics(key=lambda ind: ind.fitness.values[0])
time_stats = tools.Statistics(key=lambda ind: ind.fitness.values[1])
stats = tools.MultiStatistics(price=price_stats, time=time_stats)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN,
stats, halloffame=hof)
return pop, stats, hof
if __name__ == "__main__":
_, _, hof = main()
from matplotlib import pyplot as plt
error_price = [i.fitness.values[0] for i in hof]
time = [i.fitness.values[1] for i in hof]
plt.plot(error_price, time, 'bo')
plt.xlabel("Price difference")
plt.ylabel("Total time")
plt.show()
| lgpl-3.0 |
gpospelov/BornAgain | Wrap/Python/ba_fitmonitor.py | 1 | 8653 | # ************************************************************************** #
"""
# BornAgain: simulate and fit reflection and scattering
#
# @file Wrap/Python/ba_fitmonitor.py
# @brief Plotter classes for monitoring fit progress.
#
# @homepage http://apps.jcns.fz-juelich.de/BornAgain
# @license GNU General Public License v3 or higher (see COPYING)
# @copyright Forschungszentrum Juelich GmbH 2019
# @authors Scientific Computing Group at MLZ (see CITATION, AUTHORS)
"""
# ************************************************************************** #
import bornagain as ba
import ba_plot
try: # workaround for build servers
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import gridspec, colors
except Exception as e:
print("In ba_fitmonitor.py: {:s}".format(str(e)))
label_fontsize = 16
class Plotter:
"""
Draws fit progress. Base class for simulation-specific classes (PlotterGISAS etc).
"""
def __init__(self,
zmin=None,
zmax=None,
xlabel=None,
ylabel=None,
units=ba.Axes.DEFAULT,
aspect=None):
self._fig = plt.figure(figsize=(10.25, 7.69))
self._fig.canvas.draw()
self._zmin = zmin
self._zmax = zmax
self._xlabel = xlabel
self._ylabel = ylabel
self._units = units
self._aspect = aspect
def reset(self):
self._fig.clf()
def plot(self):
self._fig.tight_layout()
plt.pause(0.03)
class PlotterGISAS(Plotter):
"""
Draws fit progress, for GISAS simulation.
"""
def __init__(self,
zmin=None,
zmax=None,
xlabel=None,
ylabel=None,
units=ba.Axes.DEFAULT,
aspect=None):
Plotter.__init__(self, zmin, zmax, xlabel, ylabel, units, aspect)
@staticmethod
def make_subplot(nplot):
plt.subplot(2, 2, nplot)
plt.subplots_adjust(wspace=0.2, hspace=0.2)
def plot(self, fit_objective):
Plotter.reset(self)
real_data = fit_objective.experimentalData()
sim_data = fit_objective.simulationResult()
diff = fit_objective.absoluteDifference()
self.make_subplot(1)
# same limits for both plots
arr = real_data.array()
zmax = np.amax(arr) if self._zmax is None else self._zmax
zmin = zmax*1e-6 if self._zmin is None else self._zmin
ba.plot_colormap(real_data,
title="Experimental data",
zmin=zmin,
zmax=zmax,
units=self._units,
xlabel=self._xlabel,
ylabel=self._ylabel,
zlabel='',
aspect=self._aspect)
self.make_subplot(2)
ba.plot_colormap(sim_data,
title="Simulated data",
zmin=zmin,
zmax=zmax,
units=self._units,
xlabel=self._xlabel,
ylabel=self._ylabel,
zlabel='',
aspect=self._aspect)
self.make_subplot(3)
ba.plot_colormap(diff,
title="Difference",
zmin=zmin,
zmax=zmax,
units=self._units,
xlabel=self._xlabel,
ylabel=self._ylabel,
zlabel='',
aspect=self._aspect)
self.make_subplot(4)
plt.title('Parameters')
plt.axis('off')
iteration_info = fit_objective.iterationInfo()
plt.text(
0.01, 0.85,
"Iterations " + '{:d}'.format(iteration_info.iterationCount()))
plt.text(0.01, 0.75,
"Chi2 " + '{:8.4f}'.format(iteration_info.chi2()))
index = 0
params = iteration_info.parameterMap()
for key in params:
plt.text(0.01, 0.55 - index*0.1,
'{:30.30s}: {:6.3f}'.format(key, params[key]))
index = index + 1
Plotter.plot(self)
class PlotterSpecular(Plotter):
"""
Draws fit progress, for specular simulation.
"""
def __init__(self, units=ba.Axes.DEFAULT):
Plotter.__init__(self)
self.gs = gridspec.GridSpec(1, 2, width_ratios=[2.5, 1], wspace=0)
self.units = units
def __call__(self, fit_objective):
self.plot(fit_objective)
@staticmethod
def as_si(val, ndp):
"""
Fancy print of scientific-formatted values
:param val: numeric value
:param ndp: number of decimal digits to print
:return: a string corresponding to the _val_
"""
s = '{x:0.{ndp:d}e}'.format(x=val, ndp=ndp)
m, e = s.split('e')
return r'{m:s}\times 10^{{{e:d}}}'.format(m=m, e=int(e))
@staticmethod
def trunc_str(token, length):
"""
Truncates token if it is longer than length.
Example:
trunc_str("123456789", 8) returns "123456.."
trunc_str("123456789", 9) returns "123456789"
:param token: input string
:param length: max non-truncated length
:return:
"""
return (token[:length - 2] + '..') if len(token) > length else token
def plot_table(self, fit_objective):
iteration_info = fit_objective.iterationInfo()
trunc_length = 9 # max string field width in the table
n_digits = 1 # number of decimal digits to print
n_iterations = iteration_info.iterationCount(
) # current number of iterations passed
rel_dif = fit_objective.relativeDifference().array().max(
) # maximum relative difference
fitted_parameters = iteration_info.parameterMap()
# creating table content
labels = ("Parameter", "Value")
table_data = [["Iteration", '${:d}$'.format(n_iterations)],
[
"$d_{r, max}$",
'${:s}$'.format(self.as_si(rel_dif, n_digits))
]]
for key, value in fitted_parameters.iteritems():
table_data.append([
'{:s}'.format(self.trunc_str(key, trunc_length)),
'${:s}$'.format(self.as_si(value, n_digits))
])
# creating table
axs = plt.subplot(self.gs[1])
axs.axis('tight')
axs.axis('off')
table = plt.table(cellText=table_data,
colLabels=labels,
cellLoc='center',
loc='bottom left',
bbox=[0.0, 0.0, 1.0, 1.0])
def plot_graph(self, fit_objective):
# retrieving data from fit suite
real_data = fit_objective.experimentalData()
sim_data = fit_objective.simulationResult()
unc_data = fit_objective.uncertaintyData()
# data values
sim_values = sim_data.array(self.units)
real_values = real_data.array(self.units)
unc_values = None if unc_data is None else unc_data.array(self.units)
# default font properties dictionary to use
font = {'family': 'serif', 'weight': 'normal', 'size': label_fontsize}
plt.subplot(self.gs[0])
plt.semilogy(sim_data.axis(), sim_values, 'b', real_data.axis(),
real_values, 'k--')
if unc_values is not None:
plt.semilogy(real_data.axis(),
real_values - unc_values,
'xkcd:grey',
alpha=0.6)
plt.semilogy(real_data.axis(),
real_values + unc_values,
'xkcd:grey',
alpha=0.6)
plt.ylim((0.5*np.min(real_values), 5*np.max(real_values)))
xlabel = ba_plot.get_axes_labels(real_data, self.units)[0]
legend = ['BornAgain', 'Data']
if unc_values is not None:
legend = ['BornAgain', 'Data', r'Data $\pm \sigma$']
plt.legend(legend, loc='upper right', prop=font)
plt.xlabel(xlabel, fontdict=font)
plt.ylabel("Intensity", fontdict=font)
plt.title("Specular data fitting", fontdict=font)
def plot(self, fit_objective):
Plotter.reset(self)
self.plot_graph(fit_objective)
self.plot_table(fit_objective)
Plotter.plot(self)
| gpl-3.0 |
fernandezcuesta/t4Monitor | t4mon/collector.py | 2 | 39746 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
::
start()
|
v
get_data_and_logs()
/ \\
| |
v v
get_system_data() get_system_logs()
|
v
get_stats_from_host()
"""
import os
import re
import gzip
import zipfile
import datetime as dt
import tempfile
import threading
from contextlib import contextmanager
import six
import tqdm
import pandas as pd
import sshtunnel
from t4mon import df_tools, gen_plot, arguments, calculations
from paramiko import SFTPClient, SSHException
from six.moves import queue, cPickle, builtins, cStringIO
from t4mon.logger import init_logger
from t4mon.sftpsession import SftpSession, SFTPSessionError
__all__ = ('add_methods_to_pandas_dataframe',
'Collector',
'load_zipfile',
'read_pickle')
# Avoid using locale in Linux+Windows environments, keep these lowercase
MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
# sshtunnel.DAEMON = True # Cleanly stop threads when quitting
@contextmanager
def change_dir(directory, module):
"""
Context manager for restoring the current working directory
"""
module = module or os
current_dir = module.getcwd()
module.chdir(directory)
yield
module.chdir(current_dir)
def get_datetag(date=None):
"""
Return date in '%d%b%Y' format, locale independent
If no date is specified, current date is returned
"""
if not date:
date = dt.date.today()
return '{0:02d}{1}{2}'.format(date.day,
MONTHS[date.month - 1],
date.year)
def get_filename(filespec):
"""
Remove OpenVMS drive/folder from a file spec
"""
separator = ']' if ']' in filespec else ':'
return filespec.strip().split(separator)[-1]
def is_running_from_ipython():
try:
from IPython import get_ipython
return get_ipython() is not None
except ImportError:
return false
class Collector(object):
"""
Data collection class.
- Initialize the SSH tunnels towards the remote clusters over a common
gateway
- Collecting the data (T4-CSV) from remote clusters
- Optionally collect remote command output
- Create a dataframe containing processed data from all systems
- Apply calculations to resulting dataframe, saving the result in new
columns
Additional methods allow storing/loading the class into a gzipped pickle
file.
Modes:
- **threaded**: default mode.
Most operations (data collection, reporting) are executed in parallel
for each system
- **safe**: serial mode, slower.
All operations are executed serially system by system.
Arguments:
alldays (boolean or False):
Define whether or not filter remote files on current date.
If ``True``, remote files will be filtered on a timestamp with the
``DDMMMYYY`` format (i.e. ``20may2015``).
logger (Optional[logging.Logger]):
Logger object passed from an external function. A new logger is
created by calling :func:`t4mon.logger.init_logger` if nothing is
passed.
loglevel (str):
logger level, if no ``logger`` passed
nologs (boolean or False)
Skip remote log collection. An indication message will be shown in
the report showing that the log collection was omitted.
safe (boolean or False):
Define the mode (safe or threaded) for most of the class methods.
settings_file (str or :const:`t4mon.arguments.DEFAULT_SETTINGS_FILE`)
Define the name of the configuration file.
Attributes:
alldays (boolean or False):
Define whether or not filter remote files on current date.
If ``True``, remote files will be filtered on a timestamp with the
``DDMMMYYY`` format (i.e. ``20may2015``).
conf (configParser.ConfigParser):
Object containing the settings as read from settings_file (passed
as argument).
Default: ``ConfigParser`` object as obtained from sample
configuration file.
data (pandas.DataFrame):
Multiple-index dataframe containing the data collected for all the
systems. The indices are:
- ``Datetime``: sample timestamp
- ``system``: system ID for the current sample
Default: ``pandas.DataFrame()``
filecache (dict):
(key, value) dictionary containting for each remote folder for a
system (key=(system, folder)), the list of files (value) in the
remote system (or localfs if working locally) cached to avoid
doing sucessive file lookups (slow when number of files is high).
Default: empty dict
logger (Optional[logging.Logger]):
Logger object passed from an external function. A new logger is
created by calling :func:`t4mon.logger.init_logger` if nothing is
passed.
logs (dict):
Output from running remotely the command specified in the
configuration file (``MISC/remote_log_cmd``).
Default: empty dict
nologs (boolean)
Skip remote log collection. An indication message will be shown in
the report showing that the log collection was omitted.
Default: ``False``
results_queue (queue.Queue)
Queue containing the system IDs which data collection is ready.
Default: empty ``Queue`` object
safe (boolean):
Define the mode (safe vs threaded) for most of this class methods.
Default: ``False``
server (SSHTunnel.SSHTunnelForwarder):
Object representing the tunnel server.
Default: ``None``
settings_file(str):
Name of the file containing the settings
Default: :const:`t4mon.arguments.DEFAULT_SETTINGS_FILE`
systems (list):
List containing the system IDs as configured in the settings file
sections.
Default: empty list
Examples:
>>> with Collector(**options) as col:
# operations
>>> col = Collector(**options)
>>> col.init_tunnels()
>>> # operations
>>> col.stop_server()
"""
def __init__(self,
alldays=False,
logger=None,
loglevel=None,
nologs=False,
safe=False,
settings_file=None,
**kwargs):
self.alldays = alldays
self.conf = arguments.read_config(settings_file)
self.data = pd.DataFrame()
self.filecache = {}
self.logger = logger or init_logger(loglevel)
self.logs = {}
self.nologs = nologs
self.results_queue = queue.Queue()
self.safe = safe
self.settings_file = settings_file or arguments.DEFAULT_SETTINGS_FILE
self.server = None
self.systems = [item for item in self.conf.sections()
if item not in ['GATEWAY', 'MISC']]
add_methods_to_pandas_dataframe(self.logger)
def __enter__(self, system=None):
self.init_tunnels(system)
return self
def __exit__(self, etype, *args):
self.stop_server()
return None
def __str__(self):
return ('alldays/nologs: {0}/{1}\ndata shape: {2}\nlogs (keys): {3}\n'
'threaded: {4}\nserver is set up?: {5}\n'
'Settings file: {6}\n\n{7}'
''.format(self.alldays,
self.nologs,
self.data.shape,
list(self.logs.keys()),
not self.safe,
'Yes' if self.server else 'No',
self.settings_file,
self.dump_config()
)
)
def _check_if_using_gateway(self, system=None):
""" Check if the connection is tunneled over an SSH gateway or not """
try:
return self.conf.getboolean(system or 'DEFAULT', 'use_gateway')
except six.moves.configparser.Error:
return True
def __getstate__(self):
""" Method enabling class pickle """
odict = self.__dict__.copy()
if self.logger:
odict['loggername'] = self.logger.name
for item in ['logger', 'results_queue', 'server']:
del odict[item]
return odict
def __setstate__(self, state):
""" Method enabling class pickle """
state['logger'] = init_logger(name=state.get('loggername'))
if 'loggername' in state:
del state['loggername']
state['results_queue'] = queue.Queue()
state['server'] = None
self.__dict__.update(state)
def dump_config(self):
"""
Return a string with the configuration file contents
"""
config = cStringIO()
self.conf.write(config)
config.seek(0)
return config.read()
def plot(self, *args, **kwargs): # pragma: no cover
"""
Convenience method for calling :meth:`.gen_plot.plot_var`
"""
return gen_plot.plot_var(self.data,
*args,
logger=self.logger,
**kwargs)
def select(self, *args, **kwargs): # pragma: no cover
"""
Convenience method for calling :meth:`.df_tools.select`
"""
return df_tools.select(self.data,
*args,
logger=self.logger,
**kwargs)
def init_tunnels(self, system=None):
"""
Initialize SSH tunnels using ``sshtunnel`` and ``paramiko`` libraries.
Arguments:
- system
Type: string
Default: ``None``
Description:
system to initialize the tunnels. If nothing given it initializes
tunnels for all systems in ``self.systems``.
Return:
``SSHTunnelForwarder`` instance (non-started) with all tunnels
already established
"""
if not self._check_if_using_gateway(system):
return
self.logger.info('Initializing tunnels')
if not self.conf:
self.conf = arguments.read_config(self.settings_file)
jumpbox_addr = self.conf.get('GATEWAY', 'ip_or_hostname')
jumpbox_port = self.conf.getint('GATEWAY', 'ssh_port')
rbal = []
lbal = []
tunnelports = {}
systems = [system] if system else self.systems
sshtunnel.SSH_TIMEOUT = arguments.DEFAULT_SSH_TIMEOUT
for _sys in systems:
rbal.append((self.conf.get(_sys, 'ip_or_hostname'),
self.conf.getint(_sys, 'ssh_port')))
lbal.append(('', self.conf.getint(_sys, 'tunnel_port')))
if len(tunnelports) != len(set(tunnelports.values())):
self.logger.error('Local tunnel ports MUST be different: {0}'
.format(tunnelports))
raise sshtunnel.BaseSSHTunnelForwarderError
try:
pwd = self.conf.get('GATEWAY', 'password').strip("\"' ") or None \
if self.conf.has_option('GATEWAY', 'password') else None
pkey = self.conf.get('GATEWAY', 'identity_file').strip("\"' ") \
or None if self.conf.has_option('GATEWAY', 'identity_file') \
else None
user = self.conf.get('GATEWAY', 'username') or None \
if self.conf.has_option('GATEWAY', 'username') else None
self.server = sshtunnel.open_tunnel(
ssh_address_or_host=(jumpbox_addr, jumpbox_port),
ssh_username=user,
ssh_password=pwd,
remote_bind_addresses=rbal,
local_bind_addresses=lbal,
threaded=True,
logger=self.logger,
ssh_pkey=pkey,
ssh_private_key_password=pwd,
set_keepalive=15.0,
allow_agent=False,
mute_exceptions=True,
skip_tunnel_checkup=False,
)
self.server.is_use_local_check_up = True # Check local side
self._start_server()
assert self.server.is_alive
# Add the system<>port bindings to the return object
self.server.tunnelports = dict(
list(zip(systems, self.server.local_bind_ports))
)
self.logger.debug('Registered tunnels: {0}'
.format(self.server.tunnelports))
except (sshtunnel.BaseSSHTunnelForwarderError, AssertionError):
self.logger.error('{0}Could not open connection to remote server: '
'{1}:{2}'.format(
'{0} | '.format(system) if system else '',
jumpbox_addr,
jumpbox_port
))
raise sshtunnel.BaseSSHTunnelForwarderError
def _start_server(self): # pragma: no cover
"""
Start the SSH tunnels
"""
if not self.server:
raise sshtunnel.BaseSSHTunnelForwarderError
try:
self.logger.info('Opening connection to gateway')
self.server.start()
if not self.server.is_alive:
raise sshtunnel.BaseSSHTunnelForwarderError(
"Couldn't start server"
)
except AttributeError as msg:
raise sshtunnel.BaseSSHTunnelForwarderError(msg)
def stop_server(self): # pragma: no cover
"""
Stop the SSH tunnels
"""
try:
self.logger.info('Closing connection to gateway')
self.server.stop()
except AttributeError as msg:
raise sshtunnel.BaseSSHTunnelForwarderError(msg)
def check_if_tunnel_is_up(self, system):
"""
Return true if there's a tuple in :attr:`.server`'s' ``.tunnel_is_up``
such as: ``{('0.0.0.0', port): True}``
where port is the tunnel listen port for ``system``.
Arguments:
system (str): system which tunnel port will be checked
Return:
boolean
"""
if not self.server or system not in self.server.tunnelports:
return False
port = self.server.tunnelports[system]
return any(port in address_tuple for address_tuple
in six.iterkeys(self.server.tunnel_is_up)
if self.server.tunnel_is_up[address_tuple])
def get_sftp_session(self, system):
"""
By default the connection is done via SSH tunnels (controlled by
configutation item `use_gateway`)
Arguments:
system (str): system where to open the SFTP session
Return:
``paramiko.SFTPClient``
"""
if system not in self.conf.sections():
self.logger.error('{0} | System not found in configuration'
.format(system))
raise SFTPSessionError('connection to {0} failed'.format(system))
use_gateway = self._check_if_using_gateway(system)
if use_gateway:
remote_system_address = '127.0.0.1'
remote_system_port = self.server.tunnelports[system]
else:
remote_system_address = self.conf.get(system, 'ip_or_hostname')
remote_system_port = self.conf.getint(system, 'ssh_port')
self.logger.info('{0} | Connecting to {1}port {2}'
.format(system,
'tunnel ' if use_gateway else '',
remote_system_port))
ssh_pass = self.conf.get(system, 'password').strip("\"' ") or None \
if self.conf.has_option(system, 'password') else None
ssh_key = self.conf.get(system,
'identity_file').strip("\"' ") or None \
if self.conf.has_option(system, 'identity_file') else None
user = self.conf.get(system, 'username') or None \
if self.conf.has_option(system, 'username') else None
try:
if six.PY3:
ssh_timeout = self.conf.get(
system,
'ssh_timeout',
fallback=arguments.DEFAULT_SSH_TIMEOUT
)
else:
ssh_timeout = self.conf.get(
system,
'ssh_timeout'
) if self.conf.has_option(
system,
'ssh_timeout'
) else arguments.DEFAULT_SSH_TIMEOUT
return SftpSession(hostname=remote_system_address,
ssh_user=user,
ssh_pass=ssh_pass,
ssh_key=ssh_key,
ssh_timeout=ssh_timeout,
ssh_port=remote_system_port,
logger=self.logger)
except SFTPSessionError:
raise SFTPSessionError('connection to {0} failed'.format(system))
def files_lookup(self,
hostname=None,
filespec_list=None,
compressed=False,
**kwargs):
"""
Connect to a remote system via SFTP and get a list of files matching
``filespec_list`` in the remote host.
Works locally when ``hostname=None``.
Files that will be returned must match every item in ``filespec_list``,
i.e. ``data*2015*csv`` would match ``data_2015.csv``,
``2015_data_full.csv`` but not ``data_2016.csv``.
When working with the local filesystem, ``filespec_list`` may contain
absolute paths.
Working with local filesystem if not a valid sftp_session is passed
Arguments:
hostname (Optional[str]):
Remote hostname where to download the CSV files.
Default: working with local filesystem
filespec_list (Optional[list]):
list of files to look for (valid filespecs may contain
wildcards (``*``))
compressed (Optional[boolean]):
whether to look for compressed or plain (CSV) files
Default: ``False`` (look for CSV files)
Keyword Arguments:
sftp_session (paramiko.SFTPClient):
already established sftp session
files_folder: folder where files are located, either on sftp srv or
local filesystem
Return:
list:
files matching the filespec_list in the remote
host or a string with wildmarks (*), i.e. ``data*2015*.csv``
"""
sftp_session = kwargs.get('sftp_session', None)
files_folder = kwargs.get('files_folder', '.')
if files_folder[-1] == os.sep:
files_folder = files_folder[:-1] # remove trailing separator (/)
if filespec_list and isinstance(filespec_list, six.string_types):
filespec_list = filespec_list.split('*')
# default if no filter given is just the extension of the files
spec_list = filespec_list[:] or ['.zip' if compressed else '.csv']
if sftp_session:
self.logger.debug('Using established sftp session...')
self.logger.debug("Looking for remote files ({0}) at '{1}'"
.format(spec_list, files_folder))
folder_files = [get_filename(f) for f in sftp_session.run_command(
'dir /noheading /notrailing {0}'.format(files_folder)
)]
filesource = sftp_session
else:
self.logger.debug('Using local filesystem to get the files')
self.logger.debug("Looking for local files ({0}) at '{1}'"
.format(spec_list,
os.path.abspath(files_folder)))
folder_files = os.listdir(files_folder)
filesource = os
# get file list by filtering with taglist (case insensitive)
try:
with change_dir(directory=files_folder,
module=filesource):
key = (hostname or 'localfs', files_folder)
if key not in self.filecache: # fill the cache in
self.filecache[key] = folder_files
else:
self.logger.debug(
'Using cached file list for {0}'.format(key)
)
files = ['{0}/{1}'.format(filesource.getcwd(), f)
for f in self.filecache[key]
if all([v.upper() in f.upper() for v in spec_list])
]
if not files and not sftp_session:
files = filespec_list # Relative and absolute paths (local)
except EnvironmentError: # files could not be fetched
self.logger.error('{0} | Directory "{1}" not found at destination'
.format(hostname, files_folder))
return
return files
def get_stats_from_host(self,
filespec_list=None,
hostname=None,
compressed=False,
sftp_session=None,
**kwargs):
"""
Optionally connect to a remote system via SFTP to read CSV files, which
might be compressed in ZIP files, then call the CSV-pandas conversion
function.
Arguments:
filespec_list (Optional[list]):
List of strings, each representing a valid file specification
(wildcards (``*``) allowed
hostname (Optional[str]):
Remote hostname where to download the CSV files.
Default: working with local filesystem
compressed (Optional[boolean]):
Whether or not the files matching ``filespec_list`` are
compressed (deflate)
Default: ``False`` (not compressed)
sftp_session (Optional[paramiko.SFTPClient]):
SFTP session to the remote ``hostname``
Default: ``None`` (work with local filesystem)
files_folder (Optional[str]):
folder where files are located, either on sftp server or local
filesystem
Return:
``pandas.DataFrame``
"""
_df = pd.DataFrame()
files = self.files_lookup(hostname=hostname,
filespec_list=filespec_list,
compressed=compressed,
sftp_session=sftp_session,
**kwargs)
if not files:
self.logger.debug('Nothing gathered from {0}, no files were '
'selected for pattern "{1}"'
.format(hostname or 'local system',
filespec_list))
return _df
progressbar_prefix = 'Loading {0}files{1}'.format(
'compressed ' if compressed else '',
' from {0}'.format(hostname) if hostname else ''
)
tqdm_call = tqdm.tqdm_notebook if is_running_from_ipython() \
else tqdm.tqdm
for a_file in tqdm_call(files,
leave=True,
desc=progressbar_prefix,
disable=compressed,
unit='Archive' if compressed else 'File'):
if compressed:
_df = _df.combine_first(
self._load_zipfile(zip_file=a_file,
sftp_session=sftp_session)
)
# if no hostname, try to infer it from the file name
regex = 't4_(\w+)[0-9]_\w+_[0-9]{{4}}_[0-9]{{4}}_\w+.{0}'.\
format(os.path.splitext(a_file)[-1])
if not hostname and re.search(regex, a_file):
hostname = re.search(regex, a_file).groups()[0]
if hostname:
_df = df_tools.consolidate_data(_df,
system=hostname)
else:
_df = _df.combine_first(
df_tools.dataframize(data_file=a_file,
session=sftp_session,
logger=self.logger)
)
return _df
def get_system_logs(self, ssh_session, system, command=None):
"""
Get log info from the remote system, assumes an already established
ssh tunnel.
Arguments:
ssh_session (paramiko.SSHClient):
Active SSH client to the remote host where the ``command``
will be invoked
system (str):
System representation as configured in
:attr:`.settings_file` hostname where to download the CSV
files. Working with local filesystem if ``None``
command (Optional[str]):
Command that will be executed in the remote host
Return:
str: stdout text representation (stdin and stderr ignored for
OpenVMS' SSH2)
"""
if not command:
self.logger.error('No command was specified for log collection')
return
self.logger.warning('Getting log output from {0} (Remote command: {1})'
', may take a while...'.format(system, command))
try: # ignoring stdin and stderr for OpenVMS SSH2
(_, stdout, _) = ssh_session.exec_command(command)
return stdout.readlines()
except Exception as _exc:
self.logger.error('{0} | Error occurred while getting logs: {1}'
.format(system, repr(_exc)))
return None
def get_single_day_data(self, given_date=None):
"""
Given a single date, collect all systems data for such date and put the
results in :attr:`.data`.
Arguments:
given_date (datetime):
define for which day the data will be collected from the remote
systems; default: today's datetime.
"""
def _single_day_and_system_data(system, given_date=None):
given_date = get_datetag(given_date)
self.logger.info('Collecting data for system: {0}; day: {1}'
.format(system, given_date))
with self.get_sftp_session(system) as session:
result_data = self.get_system_data(session,
system,
given_date)
self.data = df_tools.consolidate_data(result_data,
dataframe=self.data,
system=system)
self.results_queue.put(system) # flag this system as done
with self: # open tunnels
self._run_systemwide(_single_day_and_system_data,
given_date)
def get_system_data(self, session, system, day=None):
"""
Create pandas DF from current session CSV files downloaded via SFTP.
Arguments:
session (SftpClient):
**Already initialized** sftp session to the remote system
system (str):
remote system hostname, as present in settings file
day (str):
String identifying for which day the data will be collected.
Default: ``datetime.date.today()`` in the format ``%d%b%Y``
Return:
``pandas.DataFrame``
"""
data = pd.DataFrame()
destdir = self.conf.get(system, 'folder') or '.'
# Filter only on '.csv' extension if alldays
tag_list = ['.csv'] + ([] if self.alldays and not day
else [day or get_datetag()])
try: # if present, also filter on cluster id
tag_list.append(self.conf.get(system, 'cluster_id').lower())
except Exception:
pass
data = self.get_stats_from_host(hostname=system,
filespec_list=tag_list,
sftp_session=session,
files_folder=destdir)
if data.empty:
self.logger.warning('{0} | No data was obtained!'.format(system))
else:
self.logger.info('{0} | Dataframe shape obtained: {1}. '
'Now applying calculations...'.format(system,
data.shape))
calc_file = self.conf.get('MISC', 'calculations_file')
if not os.path.isabs(calc_file):
calc_file = '{0}{1}{2}'.format(
os.path.dirname(os.path.abspath(self.settings_file)),
os.sep,
calc_file
)
data.apply_calcs(calc_file, system)
self.logger.info('{0} | Dataframe shape after calculations: {1}'
.format(system, data.shape))
return data
def get_data_and_logs(self, system):
"""
Collect everything needed for a system.
By default the connection is done via SSH tunnels.
Arguments:
system (str): Open an SFTP session to system and collect the CSVs
Return:
``pandas.DataFrame``
"""
# TODO: allow parallel (data | log) collection
try:
self.logger.info('{0} | Collecting statistics...'.format(system))
if (
self._check_if_using_gateway(system) and not
self.check_if_tunnel_is_up(system)
):
self.logger.error('{0} | System not reachable!'.format(system))
raise SFTPSessionError
# Get an sftp session
sftp_session = self.get_sftp_session(system)
if not sftp_session:
raise SFTPSessionError('Cannot open an SFTP session to {0}'
.format(system))
with sftp_session as session: # open the session
# Get data from the remote system
result_data = self.get_system_data(session, system)
# Done gathering data, now get the logs
if self.nologs or result_data.empty \
or not self.conf.has_option('MISC', 'remote_log_cmd'):
result_logs = '{0} | Log collection omitted'.format(system)
self.logger.info(result_logs)
else:
result_logs = self.get_system_logs(
sftp_session.ssh_transport,
system,
self.conf.get('MISC', 'remote_log_cmd')
) or '{0} | Missing logs!'.format(system)
except (IOError, SFTPSessionError):
result_data = pd.DataFrame()
result_logs = 'Could not get information from this system'
self.logger.debug('{0} | Consolidating results'.format(system))
self.data = df_tools.consolidate_data(result_data,
dataframe=self.data,
system=system)
self.logs[system] = result_logs
self.results_queue.put(system)
def _run_systemwide(self, target, *args):
"""
Run a target function systemwide and wait until all of them are
finished.
The target function is supposed to leave the value for 'system' in
self.results_queue.
"""
for system in self.systems:
thread = threading.Thread(target=target,
name=system,
args=tuple([system] + list(args)))
thread.daemon = True
thread.start()
# wait for threads to end, first one to finish will leave
# the result in the queue
for system in self.systems:
self.logger.info('{0} | Done collecting data!'
.format(self.results_queue.get()))
def _threaded_handler(self):
"""
Initialize tunnels and collect data&logs, threaded mode
"""
with self: # calls init_tunnels
self._run_systemwide(self.get_data_and_logs)
def _serial_handler(self):
"""
Get data&logs. Serial (legacy) handler, working inside a for loop
"""
for system in self.systems:
self.logger.info('{0} | Initializing tunnel'.format(system))
try:
self.init_tunnels(system=system)
self.get_data_and_logs(system=system)
except (sshtunnel.BaseSSHTunnelForwarderError,
IOError,
SFTPSessionError):
self.logger.warning('Continue to next system (if any)')
self.systems.remove(system)
continue
finally:
self.stop_server()
def start(self):
"""
Main method for the data collection
"""
try:
if self.safe:
self._serial_handler()
else:
self._threaded_handler()
except (sshtunnel.BaseSSHTunnelForwarderError, AttributeError) as exc:
self.logger.error('Could not initialize the SSH tunnels, '
'aborting ({0})'.format(repr(exc)))
except SSHException:
self.logger.error('Could not open remote connection')
except Exception as exc:
self.logger.exception(exc)
def to_pickle(self, name, compress=False, version=None):
"""
Save collector object to [optionally] gzipped pickle.
The pickle protocol used by default is the highest supported by the
platform.
Arguments:
name (str): Name of the output file
compress (boolean):
Whether or not compress (deflate) the pickle file, defaults to
``False``
version (int):
pickle version, defaults to :const:`cPickle.HIGHEST_PROTOCOL`
"""
buffer_object = six.BytesIO()
cPickle.dump(obj=self,
file=buffer_object,
protocol=version or cPickle.HIGHEST_PROTOCOL)
buffer_object.flush()
if name.endswith('.gz'):
compress = True
name = name.rsplit('.gz')[0] # will append the .gz extension below
if compress:
output = gzip
name = "{0}.gz".format(name)
else:
output = builtins
with output.open(name, 'wb') as pkl_out:
pkl_out.write(buffer_object.getvalue())
buffer_object.close()
def _load_zipfile(self, zip_file, sftp_session=None):
"""
Inflate a zip file and call :meth:`get_stats_from_host` with the
decompressed CSV files
"""
temp_dir = tempfile.gettempdir()
self.logger.info('Decompressing ZIP file {0} into {1}...'
.format(zip_file, temp_dir))
_df = pd.DataFrame()
if not isinstance(sftp_session, SFTPClient):
sftp_session = builtins # open local file
with sftp_session.open(zip_file, 'rb') as file_descriptor:
c = six.BytesIO()
c.write(file_descriptor.read())
c.seek(0)
decompressed_files = []
try:
with zipfile.ZipFile(c, 'r') as zip_data:
# extract all to a temporary folder
zip_data.extractall(temp_dir)
# Recursive call to get_stats_from_host using localfs
decompressed_files = [os.path.join(temp_dir,
f.filename)
for f in zip_data.filelist]
_df = self.get_stats_from_host(
filespec_list=decompressed_files
)
except (zipfile.BadZipfile, zipfile.LargeZipFile) as exc:
self.logger.error('Bad ZIP file: {0}'.format(zip_file))
self.logger.exception(exc)
finally:
for a_file in decompressed_files:
self.logger.debug('Deleting file {0}'.format(a_file))
os.remove(a_file)
c.close()
return _df
def load_zipfile(zipfile, system=None):
"""
Load T4-CSV files contained inside a zip archive
Arguments:
zipfile
system (Optional[str]):
Hostname where the zip file is located, None for local filesystem
Return:
``pandas.DataFrame``
"""
col = Collector(alldays=True, nologs=True)
return col.get_stats_from_host(zipfile, hostname=system, compressed=True)
def read_pickle(name, compress=False, logger=None):
"""
Restore dataframe plus its metadata from (optionally deflated) pickle store
Arguments:
name(str): Input file name
compress (Optional[boolean]):
Whether or not the file is compressed (``True`` if file extension
ends with '.gz'). Defaults to ``False``.
logger (Optional[logging.Logger]): Optional logger object
Return:
``Collector``
"""
if compress or name.endswith('.gz'):
mode = gzip
else:
mode = builtins
optargs = {'encoding': 'latin1'} if six.PY3 else {}
with mode.open(name, 'rb') as picklein:
collector_ = cPickle.load(picklein, **optargs)
if logger:
collector_.logger = logger
collector_.logger = logger or init_logger()
return collector_
def __from_t4csv(*args, **kwargs):
return df_tools.reload_from_csv(*args, **kwargs)
def add_methods_to_pandas_dataframe(logger=None):
"""
Add custom methods to pandas.DataFrame, allowing for example running
:meth:`t4mon.calculations.apply_calcs` or
:meth:`t4mon.calculations.clean_calcs` directly from any pandas DataFrame
Arguments:
logger (Optional[logging.Logger]): Optional logger object
"""
pd.DataFrame.oper = calculations.oper
pd.DataFrame.oper_wrapper = calculations.oper_wrapper
pd.DataFrame.recursive_lis = calculations.recursive_lis
pd.DataFrame.apply_calcs = calculations.apply_calcs
pd.DataFrame.clean_calcs = calculations.clean_calcs
pd.DataFrame.logger = logger or init_logger()
pd.DataFrame.select_var = df_tools.select
pd.DataFrame.plot_var = gen_plot.plot_var
pd.DataFrame.from_t4csv = __from_t4csv
pd.DataFrame.from_t4zip = load_zipfile
pd.DataFrame.to_t4csv = df_tools.dataframe_to_t4csv
| mit |
Srisai85/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
yanlend/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
mlyundin/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
dgwakeman/mne-python | examples/time_frequency/plot_time_frequency_sensors.py | 7 | 2482 | """
==============================================================
Time-frequency representations on topographies for MEG sensors
==============================================================
Both average power and intertrial coherence are displayed.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import tfr_morlet
from mne.datasets import somato
print(__doc__)
###############################################################################
# Set parameters
data_path = somato.data_path()
raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
event_id, tmin, tmax = 1, -1., 3.
# Setup for reading the raw data
raw = io.Raw(raw_fname)
baseline = (None, 0)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6))
###############################################################################
# Calculate power and intertrial coherence
freqs = np.arange(6, 30, 3) # define frequencies of interest
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
# Baseline correction can be applied to power or done in plots
# To illustrate the baseline correction in plots the next line is commented
# power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
# Inspect power
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio')
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', vmin=-0.45, vmax=0.45)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', vmin=-0.45, vmax=0.45)
mne.viz.tight_layout()
# Inspect ITC
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
| bsd-3-clause |
tmhm/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
probml/pyprobml | scripts/parzen_window_demo2.py | 1 | 2283 | # Demonstrate a non-parametric (parzen) density estimator in 1D
# Author: Gerardo Durán Martín
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import norm
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
def K(u, axis=0): return np.all(np.abs(u) <= 1/2, axis=axis)
def p1(x, X, h):
"""
KDE under a unit hypercube
"""
N, D = X.shape
xden, _ = x.shape
u = ((x - X.T) / h).reshape(D, xden, N)
ku = K(u).sum(axis=1) / (N * h ** D)
return ku
def kdeg(x, X, h, return_components=False):
"""
KDE under a gaussian kernel
"""
N, D = X.shape
nden, _ = x.shape
Xhat = X.reshape(D, 1, N)
xhat = x.reshape(D, nden, 1)
u = xhat - Xhat
u = norm(u, ord=2, axis=0) ** 2 / (2 * h ** 2) # (N, nden)
px = np.exp(-u)
if not return_components:
px = px.sum(axis=1)
px = px / (N * h * np.sqrt(2 * np.pi))
return px
def main():
data = np.array([-2.1, -1.3, -0.4, 1.9, 5.1, 6.2])[:, None]
yvals = np.zeros_like(data)
xv = np.linspace(-5, 10, 100)[:, None]
fig, ax = plt.subplots(2, 2)
# Uniform h=1
ax[0,0].scatter(data, yvals, marker="x", c="tab:gray")
ax[0,0].step(xv, p1(xv, data, 1), c="tab:blue", alpha=0.7)
ax[0,0].set_title("unif, h=1.0")
# Uniform h=2
ax[0,1].scatter(data, yvals, marker="x", c="tab:gray")
ax[0,1].step(xv, p1(xv, data, 2), c="tab:blue", alpha=0.7)
ax[0,1].set_title("unif, h=2.0")
# Gaussian h=1
ax[1,0].scatter(data, yvals, marker="x", c="tab:gray", zorder=3)
ax[1,0].plot(xv, kdeg(xv, data, 1), c="tab:blue", alpha=0.7, zorder=2)
ax[1,0].plot(xv, kdeg(xv, data, 1, True), c="tab:red", alpha=0.7,
linestyle="--", zorder=1, linewidth=1)
ax[1,0].set_title("gauss, h=1.0")
# Gaussian h=2
ax[1,1].scatter(data, yvals, marker="x", c="tab:gray", zorder=3)
ax[1,1].plot(xv, kdeg(xv, data, 2), c="tab:blue", alpha=0.7, zorder=2)
ax[1,1].plot(xv, kdeg(xv, data, 2, True), c="tab:red", alpha=0.7,
linestyle="--", zorder=1, linewidth=1)
ax[1,1].set_title("gauss, h=2.0")
plt.tight_layout()
plt.savefig("../figures/parzen_window2.pdf", dpi=300)
plt.show()
if __name__ == "__main__":
main()
| mit |
ganong123/HARK | gn/test_future_wealth.py | 1 | 7055 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 2 10:34:31 2016
@author: ganong
"""
import os
os.environ["R_HOME"] = "/Library/Frameworks/R.framework/Resources"
os.chdir("/Users/ganong/repo/HARK-comments-and-cleanup/gn")
import settings
import sys
#xxx want to change this default path
#sys.path.remove('/Users/ganong/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages')
#sys.path.remove('/Users/ganong/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/PIL')
sys.path.insert(0,'../')
#sys.path.insert(0,'../ConsumptionSavingModel')
sys.path.insert(0,'../ConsumptionSaving')
sys.path.insert(0,'../SolvingMicroDSOPs')
#test
from copy import copy, deepcopy
import numpy as np
#from HARKcore_gn import AgentType, Solution, NullFunc
#from HARKutilities import warnings # Because of "patch" to warnings modules
from HARKinterpolation import LinearInterp #, LowerEnvelope, CubicInterp
from HARKutilities import plotFuncs
#xx why does this error out on later runs but not on the first run? that's weird.
import ConsumptionSavingModel_gn as Model
#import ConsumerParameters as Params
import EstimationParameters_old as Params
#from time import clock
mystr = lambda number : "{:.4f}".format(number)
do_simulation = True
T_series = 30
baseline_params = Params.init_consumer_objects
import pandas as pd
from rpy2 import robjects
import rpy2.robjects.lib.ggplot2 as gg
from rpy2.robjects import pandas2ri
import make_plots as mp
#xx I'd like to be able to move around parameters here but I haven't figured out how yet! Need to understand self method better.
#NBCExample.assignParameters(self,solution_next,DiscFac,LivPrb,CRRA,Rfree,PermGroFac)
#enable plotting inside of iPython notebook (default rpy2 pushes to a semi-broken R plot-viewer)
import uuid
from rpy2.robjects.packages import importr
from IPython.core.display import Image
grdevices = importr('grDevices')
def ggplot_notebook(gg, width = 800, height = 600):
fn = 'tmp/{uuid}.png'.format(uuid = uuid.uuid4())
grdevices.png(fn, width = width, height = height)
gg.plot()
grdevices.dev_off()
return Image(filename=fn)
###########################################################################
#set economic parameters
age = 45
t_eval = age - 25
###########################################################################
# Solve consumer problems
#does this still work?
settings.init()
settings.t_rebate = age
settings.rebate_size = 0
IndShockExample = Model.IndShockConsumerType(**baseline_params)
IndShockExample.solve()
IndShockExample.unpack_cFunc()
IndShockExample.timeFwd()
settings.rebate_size = 1
settings.init()
FutRebateExample = Model.IndShockConsumerType(**baseline_params)
FutRebateExample.solve()
FutRebateExample.unpack_cFunc()
FutRebateExample.timeFwd()
settings.rebate_size = 0
settings.init()
IndShockExample.cFunc[19](2)
FutRebateExample.cFunc[19](2)
pandas2ri.activate()
loc = robjects.r('c(1,0)')
def gg_funcs(functions,bottom,top,N=1000,labels = ["Baseline"],
title = "Consumption and Cash-on-Hand", ylab = "y", xlab="x",
loc = loc, ltitle = 'Variable',
file_name = None):
if type(functions)==list:
function_list = functions
else:
function_list = [functions]
step = (top-bottom)/N
x = np.arange(bottom,top,step)
fig = pd.DataFrame({'x': x})
#xx there's got to be a better way to scroll through this list
i = 0
for function in function_list:
fig[labels[i]] = function(x)
#print labels[i]
i=i+1
fig = pd.melt(fig, id_vars=['x'])
#print(fig)
g = gg.ggplot(fig) + \
mp.base_plot + mp.line + mp.point + \
mp.theme_bw(base_size=9) + mp.fte_theme +mp.colors + \
gg.labs(title=title,y=ylab,x=xlab) + mp.legend_f(loc) + mp.legend_t_c(ltitle) + mp.legend_t_s(ltitle) #+ \
#
#gg.geom_text(data=pd.DataFrame(data={'l':"test"},index=np.arange(1)), x = "1", y = "1",group="1",colour="1", label = "plot mpg vs. wt")
#gg.geom_text(data=pd.DataFrame(data={'l':"test"},index=np.arange(1)), mapping=gg.aes_string(x="1", y="1",group="1",colour="1",shape="1", mapping="l"))
if file_name is not None:
mp.ggsave(file_name,g)
return(g)
###########################################################################
# Begin Plots
cf_exo = IndShockExample.cFunc[t_eval]
#cf_nbc = NBCExample.cFunc[t_eval]
cf_fut = FutRebateExample.cFunc[t_eval-1]
cf_fut_tm3 = FutRebateExample.cFunc[t_eval-3]
cf_fut_tm5 = FutRebateExample.cFunc[t_eval-5]
#right now takes 0.0855 seconds per run
#in the future, consider saving each function rather than just saving the output for a certain temp inc realization
def c_future_wealth(fut_period = 1, coh = 1, exo = True):
c_list = []
rebate_fut_vals = np.linspace(0, 1, num=11)
rebate_curr_vals = rebate_fut_vals[1:]
for rebate_fut in rebate_fut_vals:
settings.rebate_size = rebate_fut
settings.init()
if exo:
IndShockExample = Model.IndShockConsumerType(**baseline_params)
else :
IndShockExample = Model.IndShockConsumerType(**init_natural_borrowing_constraint)
IndShockExample.solve()
IndShockExample.unpack_cFunc()
IndShockExample.timeFwd()
c_list = np.append(c_list,IndShockExample.cFunc[t_eval-fut_period](coh))
for rebate_cur in rebate_curr_vals:
c_list = np.append(c_list,IndShockExample.cFunc[t_eval-fut_period](coh+rebate_cur))
c_func = LinearInterp(np.linspace(0, 2, num=21),np.array(c_list))
return(c_func)
yr = gg.ylim(range=robjects.r('c(0.55,1)'))
###########################################################################
#slide 3. consumption function out of future wealth
g = gg_funcs([cf_fut_tm5,cf_fut, cf_exo],0.01,2.5, N=50, loc=robjects.r('c(1,0)'),
ltitle = '',
labels = ['Rebate Arrives In 5 Years','Rebate Arrives Next Year', 'No Rebate'],
title = "Consumption Function With Predictable Rebate of One Year's Income",
ylab = "Consumption", xlab = "Cash-on-Hand")
mp.ggsave("future_rebate",g)
ggplot_notebook(g, height=300,width=400)
###########################################################################
#slide 4 -- convex consumpion function out of debt forgiveness
convex_c_1 = c_future_wealth(fut_period = 1)
convex_c_2 = c_future_wealth(fut_period = 2)
convex_c_3 = c_future_wealth(fut_period = 3)
convex_c_4 = c_future_wealth(fut_period = 4)
g = gg_funcs([convex_c_1,convex_c_2, convex_c_3,convex_c_4],0.0,2, N=50,
labels = ['1 Year','2 Years','3 Years','4 Years'],
xlab="Wealth Grant",
title = 'Impact of Pseudo-Debt Forgivenss \n \
From 0 to 1.0 is Future Grant. From 1.0 to 2.0 is Present Grant.\n Temp Inc = 1',
ylab = "Consumption", ltitle = "Years Until Future Grant")
g += gg.geom_vline(xintercept=1, linetype=2, colour="red", alpha=0.25)
g += yr
mp.ggsave("convex_cons_func",g)
ggplot_notebook(g, height=300,width=400)
| apache-2.0 |
UASLab/ImageAnalysis | scripts/sandbox/ob3.py | 1 | 7043 | #!/usr/bin/python3
import argparse
import cv2
import math
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import classifier
texture_and_color = False
# goal_step = 160 # this is a tuning dial
parser = argparse.ArgumentParser(description='local binary patterns test.')
parser.add_argument('--image', required=True, help='image name')
parser.add_argument('--scale', type=float, default=0.4, help='scale image before processing')
# parser.add_argument('--model', help='saved learning model name')
args = parser.parse_args()
rgb = cv2.imread(args.image, flags=cv2.IMREAD_ANYCOLOR|cv2.IMREAD_ANYDEPTH|cv2.IMREAD_IGNORE_ORIENTATION)
(h, w) = rgb.shape[:2]
# texture based classifier
tmodel = classifier.Classifier()
tmodel.init_model(basename="ob-tex")
tmodel.compute_lbp(rgb, radius=3)
tmodel.compute_grid(grid_size=128)
cv2.imshow('tmodel', cv2.resize(tmodel.index.astype('uint8'), (int(w*args.scale), int(h*args.scale))))
#tmodel.update_prediction()
# color based classifier
cmodel = classifier.Classifier()
cmodel.init_model(basename="ob-col")
cmodel.compute_redness(rgb)
cmodel.compute_grid(grid_size=128)
cv2.imshow('cmodel', cv2.resize(cmodel.index.astype('uint8'), (int(w*args.scale), int(h*args.scale))))
#cmodel.update_prediction()
# cv2.imshow('index', cv2.resize(tmodel.index, (int(w*args.scale), int(h*args.scale))))
scale_orig = cv2.resize(rgb, (int(w*args.scale), int(h*args.scale)))
scale = scale_orig.copy()
gscale = cv2.cvtColor(scale, cv2.COLOR_BGR2GRAY)
def draw(image, r1, r2, c1, c2, color, width):
cv2.rectangle(image,
(int(c1*args.scale), int(r1*args.scale)),
(int((c2)*args.scale)-1, int((r2)*args.scale)-1),
color=color, thickness=width)
def draw_prediction(image, tex_cells, col_cells, selected_cell, show_mode, alpha=0.25):
cutoff = 0.05
#colors_hex = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
# '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
colors_hex = ['#2ca02c', '#ff6f0e', '#9467bd', '#1f77b4', '#d62728',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
colors = []
for c in colors_hex:
r = int(c[1:3], 16)
g = int(c[3:5], 16)
b = int(c[5:7], 16)
colors.append( (r, g, b) )
overlay = image.copy()
for key in tex_cells:
tex_cell = tex_cells[key]
col_cell = col_cells[key]
(r1, r2, c1, c2) = tex_cell["region"]
if show_mode == "user" and tex_cell["user"] != None:
color = colors[tex_cell["user"]]
draw(overlay, r1, r2, c1, c2, color, cv2.FILLED)
elif show_mode == "tmodel" and tex_cell["prediction"] != None:
index = tex_cell["prediction"]
if index >= 0 and abs(tex_cell["score"]) >= cutoff:
color = colors[index]
draw(overlay, r1, r2, c1, c2, color, cv2.FILLED)
elif show_mode == "cmodel" and col_cell["prediction"] != None:
index = col_cell["prediction"]
if index >= 0 and abs(col_cell["score"]) >= cutoff:
color = colors[index]
draw(overlay, r1, r2, c1, c2, color, cv2.FILLED)
elif show_mode == "combined":
tindex = tex_cell["prediction"]
cindex = col_cell["prediction"]
if tindex == cindex:
if abs(tex_cell["score"]) > cutoff and abs(col_cell["score"]) > cutoff:
draw(overlay, r1, r2, c1, c2, colors[tindex], cv2.FILLED)
result = cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0)
if show_mode != "none" and show_mode != "user":
overlay = result.copy()
for key in tex_cells:
tex_cell = tex_cells[key]
(r1, r2, c1, c2) = tex_cell["region"]
if tex_cell["user"] != None:
color = colors[tex_cell["user"]]
draw(overlay, r1, r2, c1, c2, color, 2)
result = cv2.addWeighted(overlay, alpha, result, 1 - alpha, 0)
if selected_cell != None:
(r1, r2, c1, c2) = tex_cells[selected_cell]["region"]
draw(result, r1, r2, c1, c2, (255,255,255), 2)
return result
selected_cell = None
show_modes = ["none", "user", "tmodel", "cmodel", "combined"]
show_mode = "none"
show_index = 0
win = 'scale'
scale = draw_prediction(scale_orig, tmodel.cells, cmodel.cells,
selected_cell, show_mode)
cv2.imshow(win, scale)
def onmouse(event, x, y, flags, param):
global selected_cell
if event == cv2.EVENT_LBUTTONDOWN:
# show region detail
key = tmodel.find_key(int(x/args.scale), int(y/args.scale))
selected_cell = key
(r1, r2, c1, c2) = tmodel.cells[key]["region"]
rgb_region = rgb[r1:r2,c1:c2]
cv2.imshow('region', cv2.resize(rgb_region, ( (r2-r1)*3, (c2-c1)*3) ))
scale = draw_prediction(scale_orig, tmodel.cells, cmodel.cells,
selected_cell, show_mode)
cv2.imshow(win, scale)
elif event == cv2.EVENT_RBUTTONDOWN:
key = tmodel.find_key(int(x/args.scale), int(y/args.scale))
#if cells[key]["user"] == None:
# cells[key]["user"] = "yes"
#elif cells[key]["user"] == "yes":
# cells[key]["user"] = "no"
#else:
# cells[key]["user"] = None
scale = draw_prediction(scale_orig, tmodel.cells, cmodel.cells,
selected_cell, show_mode)
cv2.imshow(win, scale)
cv2.setMouseCallback(win, onmouse)
# work list
work_list = list(tmodel.cells.keys())
random.shuffle(work_list)
index = 0
while index < len(work_list):
key = work_list[index]
selected_cell = key
scale = draw_prediction(scale_orig, tmodel.cells, cmodel.cells,
selected_cell, show_mode)
(r1, r2, c1, c2) = tmodel.cells[key]["region"]
print(r1, r2, c1, c2)
rgb_region = rgb[r1:r2,c1:c2]
cv2.imshow('gray', gscale)
cv2.imshow('scale', scale)
cv2.imshow('region', cv2.resize(rgb_region, ( (r2-r1)*3, (c2-c1)*3) ))
keyb = cv2.waitKey()
if keyb >= ord('0') and keyb <= ord('9'):
tmodel.cells[selected_cell]["user"] = keyb - ord('0')
cmodel.cells[selected_cell]["user"] = keyb - ord('0')
if key == selected_cell:
index += 1
elif keyb == ord(' '):
# pass this cell
index += 1
elif keyb == ord('g'):
show_index = (show_index + 1) % 5
show_mode = show_modes[show_index]
print("Show:", show_mode)
elif keyb == ord('f'):
tmodel.update_model()
tmodel.update_prediction()
cmodel.update_model()
cmodel.update_prediction()
elif keyb == ord('q'):
quit()
# if False:
# # dist histogram
# plt.figure()
# y_pos = np.arange(len(hist))
# plt.bar(y_pos, hist, align='center', alpha=0.5)
# plt.xticks(y_pos, range(len(hist)))
# plt.ylabel('count')
# plt.title('total distance histogram')
# plt.show()
| mit |
tartavull/hnn | setup.py | 2 | 1563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"tqdm>=3.7.1",
"numpy",
"scikit-learn>=0.17",
"sklearn>=0.0",
"scipy>=0.17.0"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='hnn',
version='0.1.0',
description="Hebbian Neural Network",
long_description=readme + '\n\n' + history,
author="Ignacio Tartavull",
author_email='[email protected]',
url='https://github.com/tartavull/hnn',
packages=[
'hnn',
],
package_dir={'hnn':
'hnn'},
include_package_data=True,
install_requires=requirements,
license="ISCL",
zip_safe=False,
keywords='hnn',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| isc |
xubenben/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
bizreach/common-ml | common/commonml/text/custom_dict_vectorizer.py | 1 | 5920 | # coding: utf-8
from logging import getLogger
from commonml import es
from commonml.utils import get_nested_value
from scipy.sparse.construct import hstack
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import VectorizerMixin, TfidfVectorizer, \
CountVectorizer
from sklearn.preprocessing import LabelBinarizer
import numpy as np
import json
logger = getLogger(__name__)
class NumberPaththroughVectorizer(object):
def __init__(self, dtype):
self.dtype_text = dtype
self.vocabulary_ = ['number']
def fit(self, raw_documents):
pass
def transform(self, raw_documents):
if self.dtype_text == 'float32':
dtype = np.float32
elif self.dtype_text == 'int32':
dtype = np.int32
output = [[number] for number in raw_documents]
return csr_matrix(output, dtype=dtype)
def fit_transform(self, raw_documents):
return self.transform(self, raw_documents)
def get_feature_names(self):
# TODO what do i return
return self.vocabulary_
class ExtendedLabelBinarizer(LabelBinarizer):
def __init__(self, neg_label=0, pos_label=1,
sparse_output=False, labelindex_path=None):
super(ExtendedLabelBinarizer, self) \
.__init__(neg_label, pos_label, sparse_output)
self.labelindex_path = labelindex_path
if self.labelindex_path is not None:
with open(self.labelindex_path, 'r') as f:
self.labelindex = json.load(f)
def fit(self, y):
if self.labelindex_path is not None:
super(ExtendedLabelBinarizer, self).fit(self.labelindex)
else:
super(ExtendedLabelBinarizer, self).fit(y)
def get_feature_names(self):
return self.classes_
def build_custom_vectorizer(config):
vect_rules = []
for vect_config in config.values():
vect_rule = {}
vect_rule['name'] = vect_config.get('name')
vect_type = vect_config.pop('type')
vect_args = vect_config.get('vectorizer')
analyzer_url = vect_config.get('analyzer')
if analyzer_url is not None:
vect_args['tokenizer'] = es.build_analyzer(analyzer_url)
vectorizer = None
if vect_type == 'count':
vectorizer = CountVectorizer(**vect_args)
elif vect_type == 'tfidf':
vectorizer = TfidfVectorizer(**vect_args)
elif vect_type == 'number':
vectorizer = NumberPaththroughVectorizer(**vect_args)
elif vect_type == 'label':
vectorizer = ExtendedLabelBinarizer(**vect_args)
if vectorizer is not None:
vect_rule['vectorizer'] = vectorizer
vect_rules.append(vect_rule)
return CustomDictVectorizer(vect_rules=vect_rules)
def get_nested_str_value(doc, field, default_value=None):
value = get_nested_value(doc, field, None)
if value is None:
return default_value
if isinstance(value, list):
return ' '.join(value)
return value
class CustomDictVectorizer(BaseEstimator, VectorizerMixin):
def __init__(self, vect_rules):
self.vect_rules = vect_rules
def fit(self, raw_documents):
for vect_rule in self.vect_rules:
name = vect_rule.get('name')
vect = vect_rule.get('vectorizer')
if not hasattr(vect, '__call__'):
vect.fit([get_nested_str_value(x, name, '') for x in raw_documents])
def transform(self, raw_documents):
results = []
for vect_rule in self.vect_rules:
name = vect_rule.get('name')
vect = vect_rule.get('vectorizer')
if hasattr(vect, '__call__'):
data = vect([get_nested_str_value(x, name, '') for x in raw_documents])
else:
data = vect.transform([get_nested_str_value(x, name, '') for x in raw_documents])
if 'weight' in vect_rule:
data *= float(vect_rule.get('weight'))
results.append(data)
return hstack(results, format='csr', dtype=np.float32)
def fit_transform(self, raw_documents, y=None):
results = []
for vect_rule in self.vect_rules:
name = vect_rule.get('name')
vect = vect_rule.get('vectorizer')
if hasattr(vect, '__call__'):
data = vect([get_nested_str_value(x, name, '') for x in raw_documents])
else:
data = vect.fit_transform([get_nested_str_value(x, name, '') for x in raw_documents])
if 'weight' in vect_rule:
data *= float(vect_rule.get('weight'))
results.append(data)
return hstack(results, format='csr', dtype=np.float32)
def get_feature_names(self, append_name=True):
results = []
for vect_rule in self.vect_rules:
vect = vect_rule.get('vectorizer')
if hasattr(vect, '__call__'):
results.append(vect_rule.get('name'))
else:
if append_name:
name = vect_rule.get('name')
names = [u'{0}={1}'.format(name, x) for x in vect.get_feature_names()]
else:
names = vect.get_feature_names()
results.extend(names)
return results
def get_feature_size(self):
size = 0
for vect_rule in self.vect_rules:
vect = vect_rule.get('vectorizer')
size += len(vect.vocabulary_)
return size
def inverse_transform(self, X):
names = np.array(self.get_feature_names())
def get_names(x):
indices = np.argwhere(x.toarray().flatten() > 0).flatten()
if len(indices) == 0:
return []
else:
return names[indices]
return [get_names(x) for x in X]
| apache-2.0 |
henrynj/PMLMC | plot/test.py | 1 | 5470 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 25 21:49:57 2018
@author: jun
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
#x = np.array([5, 10, 15])
#y = np.array([36, 64, 95])
#f = interpolate.interp1d(x, y, kind='quadratic')
#
#xnew = np.arange(5, 15, 0.05)
#ynew = f(xnew) # use interpolation function returned by `interp1d`
#plt.plot(x, y, 'o', xnew, ynew, '-')
#plt.show()
#
#a = 6
#print f(a)
def sort():
a = np.array( [ [1,2],[3,4],[5,6]] )
b = np.array( [ [3,4,11],[1,2,10],[5,6,12]] )
inds = np.zeros((len(b)), dtype=int)
for i, spoint in enumerate(b[:,0:-1]):
# ind = 0
for j, bpoint in enumerate(a):
if all( np.isclose(spoint, bpoint, rtol=0, atol=1e-8) ):
ind = j
break
inds[i] = ind
# inds = np.array(inds)
print b[inds]
sort()
class test_data_order:
""" test if the order of data changed for the deformed blades"""
def __init__(self, NM, L, case_dir, data_dir):
self.NM = NM
self.L = L
self.case_dir = case_dir #"../TestCase/ls89_euler_0/"
self.data_dir = data_dir # 'data_test01/'
self.dir = case_dir + data_dir
def run(self):
for mesh_level in range(self.L+1):
# nominal data
meshsize = MESHSIZES[mesh_level]
self.read_blade_points(mesh_level)
for stype in range(2):
if (mesh_level==self.L and stype==1):
continue
level_type = 'meshlevel%d_type%d' %(mesh_level, stype)
sample_level = mesh_level + stype
numSamples = self.NM[sample_level]
for it in range(1, numSamples+1 ):
ddir = self.dir + level_type + '/sample%05d/' %it
print '%s, with sample%d' %(level_type, it)
self.get_data(ddir, meshsize)
self.compare_data()
### compare
def compare_data(self):
''' '''
ind = self.sort_points(self.surface.transpose(), self.blade_deformed)
self.delta1 = self.blade + self.disp - self.blade_deformed
self.delta2 = self.blade + self.disp - self.surface.transpose()[ind]
print self.surface.transpose()[ind] - self.blade_deformed
def sort_points(self, surface_points, original_points):
''' '''
inds = []
for i, opoint in enumerate(original_points):
for j, spoint in enumerate(surface_points):
if all( np.isclose(opoint, spoint, rtol=0, atol=1e-8) ):
ind = j
break
inds.append(ind)
return np.array(inds)
def get_data(self, ddir, meshsize):
self.read_displacement(ddir)
self.read_deformed_blade(ddir, meshsize)
self.read_surface_file(ddir)
def read_displacement(self, ddir):
''' read displacement.dat'''
fname = ddir + 'displacement.dat'
data = np.loadtxt(fname)
self.disp = data[:,1:]
def read_blade_points(self, meshlevel):
''' read coordinates of points on blade '''
fname = self.case_dir + 'meshfiles/blade_%05d' %MESHSIZES[meshlevel]
self.blade = np.loadtxt(fname)
def read_deformed_blade(self,ddir,meshsize):
mesh_filename = ddir + 'ls89_euler_%05d_deformed.su2' %meshsize
meshdata = read_mesh(mesh_filename)
mark_tags = ['BLADE']
ndim = meshdata['NDIME']
# list for marker node numbers
markernodes = []
# scan each marker
for this_tag in mark_tags:
# current mark
this_mark = meshdata['MARKS'][this_tag]
# marker elements
markelems = this_mark['ELEM']
# list for marker nodes
marknodes = [ row[1:] for row in markelems ]
# add to mesh node list
markernodes = markernodes + marknodes
# unique check
#markernodes = dict(map(lambda i:(i,1),markernodes)).keys()
markernodes = np.hstack(markernodes)
markernodes = np.unique(markernodes)
markernodes = list(markernodes)
# list for marker points
markerpoints = [meshdata['POIN'][inode][0:ndim]
for inode in markernodes]
self.blade_deformed = np.array(markerpoints)
def read_surface_file(self, ddir):
''' read single surface file'''
filename = ddir + 'surface_flow.csv'
self.surface_data = {}
f = open(filename, 'r')
reader = csv.reader(f)
# skip first line
next(reader)
for l in reader:
self.surface_data.setdefault('x',[]).append( float(l[1]) )
self.surface_data.setdefault('y',[]).append( float(l[2]) )
self.surface_data.setdefault('Pressure',[]).append( float(l[3]) )
self.surface_data.setdefault('Pressure_Coefficient',[]).append( float(l[4]) )
f.close()
self.surface = np.array( [self.surface_data['x'], self.surface_data['y']] ) | gpl-3.0 |
Selameab/opencog | opencog/python/spatiotemporal/demo.py | 33 | 1221 | __author__ = 'sebastian'
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.temporal_events.relation_formulas import FormulaCreator
from spatiotemporal.temporal_events.composition.non_linear_least_squares import DecompositionFitter
import matplotlib.pyplot as plt
all_relations = "pmoFDseSdfOMP"
a = TemporalEventTrapezium(1, 12, 4, 8)
b = TemporalEventTrapezium(9, 17, 13, 15)
# compute relations between events
temporal_relations = a * b
print("Relations: {0}".format(temporal_relations.to_list()))
# print degree for every relation
for relation in all_relations:
print(relation, temporal_relations[relation])
# plot events
a.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1)
b.plot(show_distributions=True).figure()
plt.show()
# from the 13 relations, learns parameters for all combinations of the
# before, same, and after relationships between the beginning and
# ending distributions of the two intervals
formula = FormulaCreator(DecompositionFitter(temporal_relations))
# from these relationships, computes the 13 relations again
relations_estimate = formula.calculate_relations()
print("Estimated relations: {0}".format(relations_estimate.to_list())) | agpl-3.0 |
macks22/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
Zsailer/epistasis | epistasis/pyplot/old/fraction_explained.py | 2 | 2556 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib import patches
import warnings
def fraction_explained(fraction_explained, color_vector=None, num_bins=1000,lw=0.25):
"""
Plot a square "pie" chart where each box is colored according to how often it
occurs in the data set.
Parameters
----------
"""
# Normalize fx_vector so it adds up to 1.0
internal_fx_vector = np.array(np.copy(fraction_explained))
if np.sum(internal_fx_vector) != 1.0:
warnings.warn("fx_vector does not add up to 1")
internal_fx_vector = internal_fx_vector/np.sum(internal_fx_vector)
# Create a color vector or grab the one off the command line
if color_vector is None:
# Prepare an cycle of colors
order = len(fraction_explained)
prop_cycle = plt.rcParams['axes.prop_cycle']
color_vector = prop_cycle.by_key()['color']
color_scalar = int(order / len(color_vector)) + 1
color_vector *= color_scalar
else:
if len(fx_vector) > len(color_vector):
err = "len(color_vector) must be >= len(fx_vector)\n"
raise ValueError(err)
# Discretize the input vector with appropriately scaled
side_length = np.int(np.round(np.sqrt(num_bins),0))
num_bins = side_length*side_length
fx_vector_int = np.cumsum(np.round(internal_fx_vector*num_bins,0)).astype(int)
# Generate the plot
fig = plt.figure()
ax = fig.add_subplot(111)
# path codes for drawing the boxes
box_codes = [Path.MOVETO,Path.LINETO,Path.LINETO,Path.LINETO,Path.CLOSEPOLY]
# Go through each bin and color appropriately
current_index = 0
total_counter = 0
for i in range(side_length-1,-1,-1):
for j in range(side_length):
if total_counter >= fx_vector_int[current_index]:
current_index += 1
# kind of a hack. last entry sometimes has round error and doesn't get given a color.
# use last entry to fill in.
if current_index >= len(fraction_explained):
current_index -= 1
# Draw box
verts = [(j,i),(j+1,i),(j+1,i+1),(j,i+1),(j, i),]
path = Path(verts, box_codes)
patch = patches.PathPatch(path, facecolor=color_vector[current_index], lw=lw)
ax.add_patch(patch)
total_counter += 1
# Clean up plot
ax.axis('equal')
ax.axis('off')
ax.set_xlim(-1,side_length+1)
ax.set_ylim(-1,side_length+1)
return fig, ax
| unlicense |
alarmringing/ai-language-acquisition | splice_audio.py | 1 | 1558 | from pydub import AudioSegment
import numpy, scipy, matplotlib.pyplot as plt, sklearn, librosa, mir_eval, urllib
import os
def splice(audioFile, times):
audio = AudioSegment.from_wav(audioFile)
syllables = []
for i in range(len(times)):
t = times[i]
t = t * 1000 #set to milliseconds, might have to drop sig figs
if i == 0:
syllables.append(audio[:t])
else:
t_prev = times[i - 1] * 1000
syllables.append(audio[t_prev:t])
print str(t_prev) + " " + str(t)
if i == len(times) - 1:
syllables.append(audio[t:])
return syllables
def get_times(timeFile):
times = []
with open(timeFile) as f:
for line in f:
times.append(float(line.strip()))
return times
def splice_audio(inputDir, inputPathName, onset_times):
index = 0
raw_syllables = list()
#print times
for filename in os.listdir(inputDir + "/" + inputPathName + '/'):
if filename.endswith(".wav"):
syllables = splice(inputDir + '/' + inputPathName + '/' + filename, onset_times[index])
for i, syllable in enumerate (syllables):
out_file = "./audio/output/syllables/" + inputPathName + "/" + filename.rstrip('.wav') + "{0}.wav".format(i)
if not os.path.exists(os.path.dirname(out_file)):
try:
os.makedirs(os.path.dirname(out_file))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
syllable.export(out_file, format="wav")
x, fs = librosa.load(out_file)
raw_syllables.append(x)
index += 1
#return syllables as vector
return raw_syllables
| mit |
kastnerkyle/pylearn2 | pylearn2/scripts/tests/test_print_monitor_cv.py | 48 | 1927 | """
Test print_monitor_cv.py by training on a short TrainCV YAML file and
analyzing the output pickle.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.scripts import print_monitor_cv
from pylearn2.testing.skip import skip_if_no_sklearn
def test_print_monitor_cv():
"""Test print_monitor_cv.py."""
skip_if_no_sklearn()
handle, filename = tempfile.mkstemp()
trainer = yaml_parse.load(test_print_monitor_cv_yaml %
{'filename': filename})
trainer.main_loop()
# run print_monitor_cv.py main
print_monitor_cv.main(filename)
# run print_monitor_cv.py main with all=True
print_monitor_cv.main(filename, all=True)
# cleanup
os.remove(filename)
test_print_monitor_cv_yaml = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 8,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.05,
},
],
nvis: 10,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
save_path: %(filename)s,
}
"""
| bsd-3-clause |
mwaskom/seaborn | seaborn/algorithms.py | 2 | 4971 | """Algorithms to support fitting routines in seaborn plotting functions."""
import numbers
import numpy as np
import warnings
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default=10000
Number of iterations
axis : int, default=None
Will pass axis to ``func`` as a keyword argument.
units : array, default=None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
func : string or callable, default="mean"
Function to call on the args that are passed in. If string, uses as
name of function in the numpy namespace. If nans are present in the
data, will try to use nan-aware version of named function.
seed : Generator | SeedSequence | RandomState | int | None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", "mean")
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rng = _handle_random_seed(seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
if isinstance(func, str):
# Allow named numpy functions
f = getattr(np, func)
# Try to use nan-aware version of function if necessary
missing_data = np.isnan(np.sum(np.column_stack(args)))
if missing_data and not func.startswith("nan"):
nanf = getattr(np, f"nan{func}", None)
if nanf is None:
msg = f"Data contain nans but no nan-aware version of `{func}` found"
warnings.warn(msg, UserWarning)
else:
f = nanf
else:
f = func
# Handle numpy changes
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
# Do the bootstrap
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
"""Given a seed in one of many formats, return a random number generator.
Generalizes across the numpy 1.17 changes, preferring newer functionality.
"""
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
# General interface for seeding on numpy >= 1.17
rng = np.random.default_rng(seed)
except AttributeError:
# We are on numpy < 1.17, handle options ourselves
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/mixture/plot_gmm_covariances.py | 89 | 4724 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| gpl-3.0 |
dsquareindia/scikit-learn | examples/decomposition/plot_sparse_coding.py | 60 | 4016 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
elijah513/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
nooperpudd/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
QuackenbushDev/Radium | scripts/graph.py | 1 | 1109 | import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import StringIO
import urllib, base64
def graph(title, labels, data):
fig, ax = plt.subplots()
index = np.arange(len(labels))
bar_width = 0.4
opacity = 0.7
plt.figure(figsize=(750/72, 300/72), dpi=72)
ymax=0
for value in data[0][1]:
value = float(value)
if (value > ymax):
ymax = value
ymax+= ymax * 0.20
plt.axis(xmin=0, xmax=len(labels), ymax=ymax)
plt.bar(index, data[0][1], bar_width,
alpha=opacity,
color='g',
label=data[0][0]
)
if (len(data) == 2):
plt.bar(index + bar_width, data[1][1], bar_width,
alpha=opacity,
color='b',
label=data[1][0])
#plt.ylabel('GiB')
plt.xticks(index + bar_width, labels)
plt.legend()
fig = plt.gcf()
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0)
uri = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf))
print '%s' % uri | agpl-3.0 |
mwil/collision | figs/msk_wave/plot_msk_wave.py | 1 | 2836 | #! /usr/bin/env python2.7
# Copyright 2013-2014 Matthias Wilhelm
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc_file('../rc/2fig-rc.txt')
mpl.rc('figure', figsize=(7, 4))
I, Q = 0, 1
T = 1.
dt = 0.01
mark_skip = 25
t = np.arange(-2*T, 11*T, dt)
fc = 1
omega_c = 2 * np.pi * fc
omega_p = np.pi / (2*T)
tau = 0 * T/dt
fig = plt.figure()
axes = []
axes.append(fig.add_subplot('311'))
axes.append(fig.add_subplot('312'))
axes.append(fig.add_subplot('313'))
alpha = np.array([[ 1, 1, -1, -1, 1], [ 1, -1, 1, -1, 1]])
a_i = np.concatenate([np.repeat(a, (2*T)/dt) for a in alpha[I]])
a_i = np.concatenate([np.repeat(0., T/dt), a_i, np.repeat(0., 10*T/dt)])[:len(t)]
a_q = np.concatenate([np.repeat(a, (2*T)/dt) for a in alpha[Q]])
a_q = np.concatenate([np.repeat(0., 2*T/dt), a_q, np.repeat(0., 10*T/dt)])[:len(t)]
for ax in axes:
ax.set_xlim(-2, 11)
ax.set_ylim(-1.2, 1.2)
ax.set_yticklabels([])
ax.grid(axis='x')
ax.set_yticks((-1, 0, 1))
axes[0].set_title('In-phase component', size=12)
axes[0].set_xticks(range(-1, 12, 2))
axes[0].set_xticks(range(0, 12, 2), minor=True)
axes[0].set_xticklabels([])
axes[1].set_title('Quadrature component', size=12)
axes[1].set_xticks(range(0, 12, 2))
axes[1].set_xticks(range(-1, 12, 2), minor=True)
axes[1].set_xticklabels([])
axes[1].set_ylabel('Transmitted signal amplitude')
axes[2].set_title('Passband signal $s(t)$', size=12)
axes[2].set_xlabel('Time $t$ ($/T$)')
axes[2].set_xticks(range(-1, 11))
s_i = a_i*np.cos(t*omega_c)*np.cos(t*omega_p)
s_q = a_q*np.sin(t*omega_c)*np.sin(t*omega_p)
axes[0].plot(t, a_i, c='0.0')
axes[0].plot(t, s_i, c='0.4')
axes[0].plot(t[::mark_skip], s_i[::mark_skip], '^', ms=4, c='0.4')
axes[0].plot(t, a_i*np.cos(t*omega_p), c='0.7')
axes[0].plot(t[::mark_skip], (a_i*np.cos(t*omega_p))[::mark_skip], 'o', ms=4, c='0.7')
axes[1].plot(t, a_q, c='0.0')
axes[1].plot(t, s_q, c='0.4')
axes[1].plot(t[::mark_skip], s_q[::mark_skip], '^', ms=4, c='0.4')
axes[1].plot(t, a_q*np.sin(t*omega_p), c='0.7')
axes[1].plot(t[::mark_skip], (a_q*np.sin(t*omega_p))[::mark_skip], 'o', ms=4, c='0.7')
axes[2].plot(t, s_i + s_q, c='0.1')
plt.savefig('pdf/msk_wave.pdf')
| gpl-3.0 |
PandaStabber/Praetorius_Goldberg_2016 | setup.py | 2 | 3027 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='nanogbcdt',
version='0.1.0',
license='Apache Software License',
description='Distinguishing engineered nanomaterials from natural nanomaterials using gradient-boosted '
'classification decision trees',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Eli Goldberg',
author_email='[email protected]',
url='https://github.com/eli-s-goldberg/Praetorius_Goldberg_2016',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'click',
'scikit-learn==0.18.1',
'biokit==0.3.2',
'matplotlib==1.5.3',
'pandas==0.19.1',
'scikit-learn==0.18.1',
'numpy==1.11.0',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
entry_points={
'console_scripts': [
'nanogbcdt = nanogbcdt.cli:main',
]
},
)
| mit |
shangwuhencc/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
fulmicoton/pylearn2 | pylearn2/utils/datasets.py | 44 | 9068 | """
Several utilities to evaluate an ALC on the dataset, to iterate over
minibatches from a dataset, or to merge three data with given proportions
"""
# Standard library imports
import logging
import os
import functools
from itertools import repeat
import warnings
# Third-party imports
import numpy
import scipy
from theano.compat.six.moves import reduce, xrange
import theano
try:
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
warnings.warn("Could not import some dependencies.")
# Local imports
from pylearn2.utils.rng import make_np_rng
logger = logging.getLogger(__name__)
##################################################
# 3D Visualization
##################################################
def do_3d_scatter(x, y, z, figno=None, title=None):
"""
Generate a 3D scatterplot figure and optionally give it a title.
Parameters
----------
x : WRITEME
y : WRITEME
z : WRITEME
figno : WRITEME
title : WRITEME
"""
fig = pyplot.figure(figno)
ax = Axes3D(fig)
ax.scatter(x, y, z)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
pyplot.suptitle(title)
def save_plot(repr, path, name="figure.pdf", title="features"):
"""
.. todo::
WRITEME
"""
# TODO : Maybe run a PCA if shape[1] > 3
assert repr.get_value(borrow=True).shape[1] == 3
# Take the first 3 columns
x, y, z = repr.get_value(borrow=True).T
do_3d_scatter(x, y, z)
# Save the produces figure
filename = os.path.join(path, name)
pyplot.savefig(filename, format="pdf")
logger.info('... figure saved: {0}'.format(filename))
##################################################
# Features or examples filtering
##################################################
def filter_labels(train, label, classes=None):
"""
Filter examples of train for which we have labels
Parameters
----------
train : WRITEME
label : WRITEME
classes : WRITEME
Returns
-------
WRITEME
"""
if isinstance(train, theano.tensor.sharedvar.SharedVariable):
train = train.get_value(borrow=True)
if isinstance(label, theano.tensor.sharedvar.SharedVariable):
label = label.get_value(borrow=True)
if not (isinstance(train, numpy.ndarray) or scipy.sparse.issparse(train)):
raise TypeError('train must be a numpy array, a scipy sparse matrix,'
' or a theano shared array')
# Examples for which any label is set
if classes is not None:
label = label[:, classes]
# Special case for sparse matrices
if scipy.sparse.issparse(train):
idx = label.sum(axis=1).nonzero()[0]
return (train[idx], label[idx])
# Compress train and label arrays according to condition
condition = label.any(axis=1)
return tuple(var.compress(condition, axis=0) for var in (train, label))
def nonzero_features(data, combine=None):
"""
Get features for which there are nonzero entries in the data.
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function, optional
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
Notes
-----
I would return a mask (bool array) here, but scipy.sparse doesn't appear to
fully support advanced indexing.
"""
if combine is None:
combine = functools.partial(reduce, numpy.logical_and)
# Assumes all values are >0, which is the case for all sparse datasets.
masks = numpy.asarray([subset.sum(axis=0) for subset in data]).squeeze()
nz_feats = combine(masks).nonzero()[0]
return nz_feats
# TODO: Is this a duplicate?
def filter_nonzero(data, combine=None):
"""
Filter non-zero features of data according to a certain combining function
Parameters
----------
data : list of matrices
List of data matrices, either in sparse format or not.
They must have the same number of features (column number).
combine : function
A function to combine elementwise which features to keep.
Default keeps the intersection of each non-zero columns.
Returns
-------
indices : ndarray object
Indices of the nonzero features.
"""
nz_feats = nonzero_features(data, combine)
return [set[:, nz_feats] for set in data]
##################################################
# Iterator object for minibatches of datasets
##################################################
class BatchIterator(object):
"""
Builds an iterator object that can be used to go through the minibatches
of a dataset, with respect to the given proportions in conf
Parameters
----------
dataset : WRITEME
set_proba : WRITEME
batch_size : WRITEME
seed : WRITEME
"""
def __init__(self, dataset, set_proba, batch_size, seed=300):
# Local shortcuts for array operations
flo = numpy.floor
sub = numpy.subtract
mul = numpy.multiply
div = numpy.divide
mod = numpy.mod
# Record external parameters
self.batch_size = batch_size
if (isinstance(dataset[0], theano.Variable)):
self.dataset = [set.get_value(borrow=True) for set in dataset]
else:
self.dataset = dataset
# Compute maximum number of samples for one loop
set_sizes = [set.shape[0] for set in self.dataset]
set_batch = [float(self.batch_size) for i in xrange(3)]
set_range = div(mul(set_proba, set_sizes), set_batch)
set_range = map(int, numpy.ceil(set_range))
# Upper bounds for each minibatch indexes
set_limit = numpy.ceil(numpy.divide(set_sizes, set_batch))
self.limit = map(int, set_limit)
# Number of rows in the resulting union
set_tsign = sub(set_limit, flo(div(set_sizes, set_batch)))
set_tsize = mul(set_tsign, flo(div(set_range, set_limit)))
l_trun = mul(flo(div(set_range, set_limit)), mod(set_sizes, set_batch))
l_full = mul(sub(set_range, set_tsize), set_batch)
self.length = sum(l_full) + sum(l_trun)
# Random number generation using a permutation
index_tab = []
for i in xrange(3):
index_tab.extend(repeat(i, set_range[i]))
# Use a deterministic seed
self.seed = seed
rng = make_np_rng(seed, which_method="permutation")
self.permut = rng.permutation(index_tab)
def __iter__(self):
"""Generator function to iterate through all minibatches"""
counter = [0, 0, 0]
for chosen in self.permut:
# Retrieve minibatch from chosen set
index = counter[chosen]
minibatch = self.dataset[chosen][
index * self.batch_size:(index + 1) * self.batch_size
]
# Increment the related counter
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
# Return the computed minibatch
yield minibatch
def __len__(self):
"""Return length of the weighted union"""
return self.length
def by_index(self):
"""Same generator as __iter__, but yield only the chosen indexes"""
counter = [0, 0, 0]
for chosen in self.permut:
index = counter[chosen]
counter[chosen] = (counter[chosen] + 1) % self.limit[chosen]
yield chosen, index
##################################################
# Miscellaneous
##################################################
def minibatch_map(fn, batch_size, input_data, output_data=None,
output_width=None):
"""
Apply a function on input_data, one minibatch at a time.
Storage for the output can be provided. If it is the case,
it should have appropriate size.
If output_data is not provided, then output_width should be specified.
Parameters
----------
fn : WRITEME
batch_size : WRITEME
input_data : WRITEME
output_data : WRITEME
output_width : WRITEME
Returns
-------
WRITEME
"""
if output_width is None:
if output_data is None:
raise ValueError('output_data or output_width should be provided')
output_width = output_data.shape[1]
output_length = input_data.shape[0]
if output_data is None:
output_data = numpy.empty((output_length, output_width))
else:
assert output_data.shape[0] == input_data.shape[0], ('output_data '
'should have the same length as input_data',
output_data.shape[0], input_data.shape[0])
for i in xrange(0, output_length, batch_size):
output_data[i:i+batch_size] = fn(input_data[i:i+batch_size])
return output_data
| bsd-3-clause |
harisbal/pandas | pandas/tests/frame/test_dtypes.py | 1 | 41264 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import timedelta
import numpy as np
from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp,
Categorical, compat, concat, option_context)
from pandas.compat import u
from pandas import _np_version_under1p14
from pandas.core.arrays import integer_array
from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype
from pandas.tests.frame.common import TestData
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(params=[str, compat.text_type])
def text_dtype(request):
return request.param
class TestDataFrameDataTypes(TestData):
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df['a'] = df['a'].astype(np.bool_)
df['b'] = df['b'].astype(np.int32)
df['c'] = df['c'].astype(np.float64)
result = pd.concat([df, df])
assert result['a'].dtype == np.bool_
assert result['b'].dtype == np.int32
assert result['c'].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result['a'].dtype == np.object_
assert result['b'].dtype == np.float64
assert result['c'].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
assert_series_equal(norows_df.dtypes, pd.Series(
np.object, index=list("abc")))
assert_series_equal(norows_df.ftypes, pd.Series(
'object:dense', index=list("abc")))
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
assert_series_equal(norows_int_df.dtypes, pd.Series(
np.dtype('int32'), index=list("abc")))
assert_series_equal(norows_int_df.ftypes, pd.Series(
'int32:dense', index=list("abc")))
odict = compat.OrderedDict
df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]),
index=[1, 2, 3])
ex_dtypes = pd.Series(odict([('a', np.int64),
('b', np.bool),
('c', np.float64)]))
ex_ftypes = pd.Series(odict([('a', 'int64:dense'),
('b', 'bool:dense'),
('c', 'float64:dense')]))
assert_series_equal(df.dtypes, ex_dtypes)
assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
assert_series_equal(df[:0].dtypes, ex_dtypes)
assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame({'A': date_range('20130101', periods=3),
'B': date_range('20130101', periods=3,
tz='US/Eastern'),
'C': date_range('20130101', periods=3, tz='CET')})
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series([np.dtype('datetime64[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('datetime64[ns, CET]')],
['A', 'B', 'C'])
assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = compat.OrderedDict
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_),
('b', np.float_),
('c', np.float_)])))
assert_series_equal(df.iloc[:, 2:].dtypes,
pd.Series(odict([('c', np.float_)])))
assert_series_equal(df.dtypes,
pd.Series(odict([('a', np.float_),
('b', np.float_),
('c', np.float_)])))
def test_select_dtypes_include_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=[np.number])
ei = df[['b', 'c', 'd', 'k']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=['timedelta'])
ei = df[['b', 'c', 'd']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, 'category'],
exclude=['timedelta'])
ei = df[['b', 'c', 'd', 'f']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetime'])
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetime64'])
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=['datetimetz'])
ei = df[['h', 'i']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(include=['period']))
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True]})
re = df.select_dtypes(exclude=[np.number])
ee = df[['a', 'e']]
assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number)
ei = df[['b', 'c', 'd', 'k']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='datetime')
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='datetime64')
ei = df[['g']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include='category')
ei = df[['f']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(include='period'))
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(exclude=np.number)
ei = df[['a', 'e', 'f', 'g', 'h', 'i', 'j']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude='category')
ei = df[['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j', 'k']]
assert_frame_equal(ri, ei)
pytest.raises(NotImplementedError,
lambda: df.select_dtypes(exclude='period'))
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number, exclude='floating')
ei = df[['b', 'c', 'k']]
assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3,
tz='CET'),
'j': pd.period_range('2013-01', periods=3,
freq='M'),
'k': pd.timedelta_range('1 day', periods=3)})
ri = df.select_dtypes(include=np.number,
exclude=['floating', 'timedelta'])
ei = df[['b', 'c']]
assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, 'category'],
exclude='floating')
ei = df[['b', 'c', 'f', 'k']]
assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = compat.OrderedDict
df = DataFrame(odict([('a', list('abc')),
('b', list(range(1, 4))),
('c', np.arange(3, 6).astype('u1')),
('d', np.arange(4.0, 7.0, dtype='float64')),
('e', [True, False, True]),
('f', pd.date_range('now', periods=3).values)]))
df.columns = ['a', 'a', 'b', 'b', 'b', 'c']
expected = DataFrame({'a': list(range(1, 4)),
'b': np.arange(3, 6).astype('u1')})
result = df.select_dtypes(include=[np.number], exclude=['floating'])
assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
df['g'] = df.f.diff()
assert not hasattr(np, 'u8')
r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])
e = df[['a', 'b']]
assert_frame_equal(r, e)
r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])
e = df[['a', 'b', 'g']]
assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})
with tm.assert_raises_regex(ValueError, 'at least one of '
'include or exclude '
'must be nonempty'):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assert_raises_regex(ValueError, '.+ is too specific'):
df.select_dtypes(include=['datetime64[D]'])
with tm.assert_raises_regex(ValueError, '.+ is too specific'):
df.select_dtypes(exclude=['datetime64[as]'])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=['datetime64[ns]'])
expected = df3.reindex(columns=[])
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [
str, "str", np.string_, "S1", "unicode", np.unicode_, "U1",
compat.text_type
])
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame({"a": list("abc"),
"g": list(u("abc")),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values})
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with tm.assert_raises_regex(TypeError, msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame({'a': list('abc'),
'g': list(u('abc')),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
with tm.assert_raises_regex(TypeError, 'data type.'
'*not understood'):
df.select_dtypes(['blargy, blarg, blarg'])
def test_select_dtypes_typecodes(self):
# GH 11990
df = mkdf(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes['AllFloat'])
assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self):
self.mixed_frame['bool'] = self.mixed_frame['A'] > 0
result = self.mixed_frame.dtypes
expected = Series({k: v.dtype
for k, v in compat.iteritems(self.mixed_frame)},
index=result.index)
assert_series_equal(result, expected)
# compat, GH 8722
with option_context('use_inf_as_na', True):
df = DataFrame([[1]])
result = df.dtypes
assert_series_equal(result, Series({0: np.dtype('int64')}))
def test_ftypes(self):
frame = self.mixed_float
expected = Series(dict(A='float32:dense',
B='float32:dense',
C='float16:dense',
D='float64:dense')).sort_values()
result = frame.ftypes.sort_values()
assert_series_equal(result, expected)
def test_astype(self):
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
casted = self.frame.astype(np.int32)
expected = DataFrame(self.frame.values.astype(np.int32),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
self.frame['foo'] = '5'
casted = self.frame.astype(int)
expected = DataFrame(self.frame.values.astype(int),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(casted, expected)
# mixed casting
def _check_cast(df, v):
assert (list({s.dtype.name for
_, s in compat.iteritems(df)})[0] == v)
mn = self.all_mixed._get_numeric_data().copy()
mn['little_float'] = np.array(12345., dtype='float16')
mn['big_float'] = np.array(123456789101112., dtype='float64')
casted = mn.astype('float64')
_check_cast(casted, 'float64')
casted = mn.astype('int64')
_check_cast(casted, 'int64')
casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float32')
_check_cast(casted, 'float32')
casted = mn.reindex(columns=['little_float']).astype('float16')
_check_cast(casted, 'float16')
casted = self.mixed_float.reindex(columns=['A', 'B']).astype('float16')
_check_cast(casted, 'float16')
casted = mn.astype('float32')
_check_cast(casted, 'float32')
casted = mn.astype('int32')
_check_cast(casted, 'int32')
# to object
casted = mn.astype('O')
_check_cast(casted, 'object')
def test_astype_with_exclude_string(self):
df = self.frame.copy()
expected = self.frame.astype(int)
df['string'] = 'foo'
casted = df.astype(int, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
df = self.frame.copy()
expected = self.frame.astype(np.int32)
df['string'] = 'foo'
casted = df.astype(np.int32, errors='ignore')
expected['string'] = 'foo'
assert_frame_equal(casted, expected)
def test_astype_with_view(self):
tf = self.mixed_float.reindex(columns=['A', 'B', 'C'])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32)
# this is the only real reason to do it this way
tf = np.round(self.frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = self.frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with tm.assert_raises_regex(ValueError, msg):
df.astype(dtype)
def test_astype_str(self, text_dtype):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
# Test str and unicode on Python 2.x and just str on Python 3.x
result = df.astype(text_dtype)
expected = DataFrame({
"a": list(map(text_dtype,
map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(text_dtype, map(Timestamp, b._values))),
"c": list(map(text_dtype,
map(lambda x: Timedelta(x)._repr_base(format="all"),
c._values))),
"d": list(map(text_dtype, d._values)),
"e": list(map(text_dtype, e._values)),
})
assert_frame_equal(result, expected)
def test_astype_str_float(self, text_dtype):
# see gh-11302
result = DataFrame([np.NaN]).astype(text_dtype)
expected = DataFrame(["nan"])
assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(text_dtype)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = ("1.12345678901" if _np_version_under1p14
else "1.1234567890123457")
expected = DataFrame([val])
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range('2010-01-04', periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(['1.0', '2', '3.14', '4', '5.4'])
df = DataFrame({'a': a, 'b': b, 'c': c, 'd': d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({'b': 'str', 'd': 'float32'})
result = df.astype(dt1)
expected = DataFrame({
'a': a,
'b': Series(['0', '1', '2', '3', '4']),
'c': c,
'd': Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype='float32')})
assert_frame_equal(result, expected)
assert_frame_equal(df, original)
dt2 = dtype_class({'b': np.float32, 'c': 'float32', 'd': np.float64})
result = df.astype(dt2)
expected = DataFrame({
'a': a,
'b': Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype='float32'),
'c': Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype='float32'),
'd': Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype='float64')})
assert_frame_equal(result, expected)
assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({'a': str, 'b': str, 'c': str, 'd': str})
assert_frame_equal(df.astype(dt3),
df.astype(str))
assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({'b': str, 2: str})
dt5 = dtype_class({'e': str})
pytest.raises(KeyError, df.astype, dt4)
pytest.raises(KeyError, df.astype, dt5)
assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
assert_frame_equal(df, equiv)
assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
assert_frame_equal(df, equiv)
assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name='a')
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name='b')
a2 = Series([0, 1, 2, 3, 4], name='a')
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(['1', '2', '3', '4', '5'], dtype='str', name='a')
b_str = Series(['0.1', '0.2', '0.4', '0.6', '0.8'], dtype=str,
name='b')
a2_str = Series(['0', '1', '2', '3', '4'], dtype='str', name='a')
expected = concat([a1_str, b_str, a2_str], axis=1)
assert_frame_equal(result, expected)
result = df.astype({'a': 'str'})
expected = concat([a1_str, b, a2_str], axis=1)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('dtype', [
'category',
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list('abcdef')),
CategoricalDtype(categories=list('edba'), ordered=False),
CategoricalDtype(categories=list('edcb'), ordered=True)], ids=repr)
def test_astype_categorical(self, dtype):
# GH 18099
d = {'A': list('abbc'), 'B': list('bccd'), 'C': list('cdde')}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cls", [
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype
])
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ['a', 'a', 'b', 'c']})
xpr = "Expected an instance of {}".format(cls.__name__)
with tm.assert_raises_regex(TypeError, xpr):
df.astype({"A": cls})
with tm.assert_raises_regex(TypeError, xpr):
df['A'].astype(cls)
@pytest.mark.parametrize("dtype", ['Int64', 'Int32', 'Int16'])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], columns=['a', 'b'])
expected1 = pd.DataFrame({'a': integer_array([1, 3, 5],
dtype=dtype),
'b': integer_array([2, 4, 6],
dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype('float64'), df)
df = pd.DataFrame([[1., 2.], [3., 4.], [5., 6.]], columns=['a', 'b'])
df['b'] = df['b'].astype(dtype)
expected2 = pd.DataFrame({'a': [1., 3., 5.],
'b': integer_array([2, 4, 6],
dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ['Int64', 'Int32', 'Int16'])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({'a': [1., 2., 3.]})
expected1 = pd.DataFrame({'a': integer_array([1, 2, 3],
dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
df = pd.DataFrame({'a': [1., 2., 3.]})
df['a'] = df['a'].astype(dtype)
expected2 = pd.DataFrame({'a': integer_array([1, 2, 3],
dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1)
@pytest.mark.parametrize('dtype', [
{100: 'float64', 200: 'uint64'}, 'category', 'float64'])
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name='foo')
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith('M8'):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['ns'])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
with pytest.raises(TypeError):
df.astype(other)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(dict(A=Series(date_range('2012-1-1', periods=3,
freq='D')),
B=Series([timedelta(days=i) for i in range(3)])))
result = df.get_dtype_counts().sort_index()
expected = Series(
{'datetime64[ns]': 1, 'timedelta64[ns]': 1}).sort_index()
assert_series_equal(result, expected)
df['C'] = df['A'] + df['B']
expected = Series(
{'datetime64[ns]': 2, 'timedelta64[ns]': 1}).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
# mixed int types
df['D'] = 1
expected = Series({'datetime64[ns]': 2,
'timedelta64[ns]': 1,
'int64': 1}).sort_values()
result = df.get_dtype_counts().sort_values()
assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
df.astype(np.int8, errors='ignore')
@pytest.mark.parametrize('input_vals', [
([1, 2]),
(['1', '2']),
(list(pd.date_range('1/1/2011', periods=2, freq='H'))),
(list(pd.date_range('1/1/2011', periods=2, freq='H',
tz='US/Eastern'))),
([pd.Interval(left=0, right=5)]),
])
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({'A': input_vals}, dtype=string_dtype)
expected = DataFrame({'A': input_vals}).astype({'A': string_dtype})
assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ['1.0', '2.0', None]}, dtype=object)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("data, expected", [
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(DataFrame({"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object)}), True),
# multi-extension
(DataFrame({"A": pd.Categorical(['a', 'b']),
"B": pd.Categorical(['a', 'b'])}), True),
# differ types
(DataFrame({"A": [1, 2], "B": [1., 2.]}), False),
# differ sizes
(DataFrame({"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64)}), False),
# multi-extension differ
(DataFrame({"A": pd.Categorical(['a', 'b']),
"B": pd.Categorical(['b', 'c'])}), False),
])
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]),
"B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype='object')
tm.assert_numpy_array_equal(result, expected)
class TestDataFrameDatetimeWithTZ(TestData):
def test_interleave(self):
# interleave with object
result = self.tzframe.assign(D='foo').values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100', tz='CET')],
['foo', 'foo', 'foo']], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = self.tzframe.values
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]], dtype=object).T
tm.assert_numpy_array_equal(result, expected)
def test_astype(self):
# astype
expected = np.array([[Timestamp('2013-01-01 00:00:00'),
Timestamp('2013-01-02 00:00:00'),
Timestamp('2013-01-03 00:00:00')],
[Timestamp('2013-01-01 00:00:00-0500',
tz='US/Eastern'),
pd.NaT,
Timestamp('2013-01-03 00:00:00-0500',
tz='US/Eastern')],
[Timestamp('2013-01-01 00:00:00+0100', tz='CET'),
pd.NaT,
Timestamp('2013-01-03 00:00:00+0100',
tz='CET')]],
dtype=object).T
result = self.tzframe.astype(object)
assert_frame_equal(result, DataFrame(
expected, index=self.tzframe.index, columns=self.tzframe.columns))
result = self.tzframe.astype('datetime64[ns]')
expected = DataFrame({'A': date_range('20130101', periods=3),
'B': (date_range('20130101', periods=3,
tz='US/Eastern')
.tz_convert('UTC')
.tz_localize(None)),
'C': (date_range('20130101', periods=3,
tz='CET')
.tz_convert('UTC')
.tz_localize(None))})
expected.iloc[1, 1] = pd.NaT
expected.iloc[1, 2] = pd.NaT
assert_frame_equal(result, expected)
def test_astype_str(self):
# str formatting
result = self.tzframe.astype(str)
expected = DataFrame([['2013-01-01', '2013-01-01 00:00:00-05:00',
'2013-01-01 00:00:00+01:00'],
['2013-01-02', 'NaT', 'NaT'],
['2013-01-03', '2013-01-03 00:00:00-05:00',
'2013-01-03 00:00:00+01:00']],
columns=self.tzframe.columns)
tm.assert_frame_equal(result, expected)
with option_context('display.max_columns', 20):
result = str(self.tzframe)
assert ('0 2013-01-01 2013-01-01 00:00:00-05:00 '
'2013-01-01 00:00:00+01:00') in result
assert ('1 2013-01-02 '
'NaT NaT') in result
assert ('2 2013-01-03 2013-01-03 00:00:00-05:00 '
'2013-01-03 00:00:00+01:00') in result
| bsd-3-clause |
tomlof/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 94 | 2264 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
navrasio/mxnet | example/rcnn/rcnn/pycocotools/coco.py | 41 | 19083 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
| apache-2.0 |
AndrewRook/NFLWin | nflwin/model.py | 1 | 25005 | """Tools for creating and running the model."""
from __future__ import print_function, division
import os
import numpy as np
from scipy import integrate
from scipy import stats
import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import brier_score_loss
from sklearn.neighbors import KernelDensity
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import NotFittedError
from . import preprocessing, utilities
class WPModel(object):
"""The object that computes win probabilities.
In addition to holding the model itself, it defines some columns names likely to be
used in the model as parameters to allow other users to more easily figure out which
columns go into the model.
Parameters
----------
copy_data : boolean (default=``True``)
Whether or not to copy data when fitting and applying the model. Running the model
in-place (``copy_data=False``) will be faster and have a smaller memory footprint,
but if not done carefully can lead to data integrity issues.
Attributes
----------
model : A Scikit-learn pipeline (or equivalent)
The actual model used to compute WP. Upon initialization it will be set to
a default model, but can be overridden by the user.
column_descriptions : dictionary
A dictionary whose keys are the names of the columns used in the model, and the values are
string descriptions of what the columns mean. Set at initialization to be the default model,
if you create your own model you'll need to update this attribute manually.
training_seasons : A list of ints, or ``None`` (default=``None``)
If the model was trained using data downloaded from nfldb, a list of the seasons
used to train the model. If nfldb was **not** used, an empty list. If no model
has been trained yet, ``None``.
training_season_types : A list of strings or ``None`` (default=``None``)
Same as ``training_seasons``, except for the portions of the seasons used in training the
model ("Preseason", "Regular", and/or "Postseason").
validation_seasons : same as ``training_seasons``, but for validation data.
validation_season_types : same as ``training_season_types``, but for validation data.
sample_probabilities : A numpy array of floats or ``None`` (default=``None``)
After the model has been validated, contains the sampled predicted probabilities used to
compute the validation statistic.
predicted_win_percents : A numpy array of floats or ``None`` (default=``None``)
After the model has been validated, contains the actual probabilities in the test
set at each probability in ``sample_probabilities``.
num_plays_used : A numpy array of floats or ``None`` (default=``None``)
After the model has been validated, contains the number of plays used to compute each
element of ``predicted_win_percents``.
model_directory : string
The directory where all models will be saved to or loaded from.
"""
model_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
_default_model_filename = "default_model.nflwin"
def __init__(self,
copy_data=True
):
self.copy_data = copy_data
self.model = self.create_default_pipeline()
self._training_seasons = None
self._training_season_types = None
self._validation_seasons = None
self._validation_season_types = None
self._sample_probabilities = None
self._predicted_win_percents = None
self._num_plays_used = None
@property
def training_seasons(self):
return self._training_seasons
@property
def training_seasons_types(self):
return self._training_season_types
@property
def validation_seasons(self):
return self._validation_seasons
@property
def validation_seasons_types(self):
return self._validation_season_types
@property
def sample_probabilities(self):
return self._sample_probabilities
@property
def predicted_win_percents(self):
return self._predicted_win_percents
@property
def num_plays_used(self):
return self._num_plays_used
def train_model(self,
source_data="nfldb",
training_seasons=(2009, 2010, 2011, 2012, 2013, 2014),
training_season_types=("Regular", "Postseason"),
target_colname="offense_won"):
"""Train the model.
Once a modeling pipeline is set up (either the default or something
custom-generated), historical data needs to be fed into it in order to
"fit" the model so that it can then be used to predict future results.
This method implements a simple wrapper around the core Scikit-learn functionality
which does this.
The default is to use data from the nfldb database, however that can be changed
to a simple Pandas DataFrame if desired (for instance if you wish to use data
from another source).
There is no particular output from this function, rather the parameters governing
the fit of the model are saved inside the model object itself. If you want to get an
estimate of the quality of the fit, use the ``validate_model`` method after running
this method.
Notes
-----
If you are loading in the default model, **there is no need to re-run this method**.
In fact, doing so will likely result in weird errors and could corrupt the model if you
were to try to save it back to disk.
Parameters
----------
source_data : the string ``"nfldb"`` or a Pandas DataFrame (default=``"nfldb"``)
The data to be used to train the model. If ``"nfldb"``, will query the nfldb
database for the training data (note that this requires a correctly configured
installation of nfldb's database).
training_seasons : list of ints (default=``[2009, 2010, 2011, 2012, 2013, 2014]``)
What seasons to use to train the model if getting data from the nfldb database.
If ``source_data`` is not ``"nfldb"``, this argument will be ignored.
**NOTE:** it is critical not to use all possible data in order to train the
model - some will need to be reserved for a final validation (see the
``validate_model`` method). A good dataset to reserve
for validation is the most recent one or two NFL seasons.
training_season_types : list of strings (default=``["Regular", "Postseason"]``)
If querying from the nfldb database, what parts of the seasons to use.
Options are "Preseason", "Regular", and "Postseason". If ``source_data`` is not
``"nfldb"``, this argument will be ignored.
target_colname : string or integer (default=``"offense_won"``)
The name of the target variable column.
Returns
-------
``None``
"""
self._training_seasons = []
self._training_season_types = []
if isinstance(source_data, str):
if source_data == "nfldb":
source_data = utilities.get_nfldb_play_data(season_years=training_seasons,
season_types=training_season_types)
self._training_seasons = training_seasons
self._training_season_types = training_season_types
else:
raise ValueError("WPModel: if source_data is a string, it must be 'nfldb'")
target_col = source_data[target_colname]
feature_cols = source_data.drop(target_colname, axis=1)
self.model.fit(feature_cols, target_col)
def validate_model(self,
source_data="nfldb",
validation_seasons=(2015,),
validation_season_types=("Regular", "Postseason"),
target_colname="offense_won"):
"""Validate the model.
Once a modeling pipeline is trained, a different dataset must be fed into the trained model
to validate the quality of the fit.
This method implements a simple wrapper around the core Scikit-learn functionality
which does this.
The default is to use data from the nfldb database, however that can be changed
to a simple Pandas DataFrame if desired (for instance if you wish to use data
from another source).
The output of this method is a p value which represents the confidence at which
we can reject the null hypothesis that the model predicts the appropriate win
probabilities. This number is computed by first smoothing the predicted win probabilities of both all test data and
just the data where the offense won with a gaussian `kernel density
estimate <http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html#sklearn.neighbors.KernelDensity>`_
with standard deviation = 0.01. Once the data is smooth, ratios at each percentage point from 1% to 99% are computed (i.e.
what fraction of the time did the offense win when the model says they have a 1% chance of winning, 2% chance, etc.). Each of
these ratios should be well approximated by the binomial distribution, since they are essentially independent (not perfectly
but hopefully close enough) weighted coin flips, giving a p value. From there `Fisher's method <https://en.wikipedia.org/wiki/Fisher%27s_method>`_
is used to combine the p values into a global p value. A p value close to zero means that the model is unlikely to be
properly predicting the correct win probabilities. A p value close to one, **while not proof that the model is correct**,
means that the model is at least not inconsistent with the hypothesis that it predicts good win probabilities.
Parameters
----------
source_data : the string ``"nfldb"`` or a Pandas DataFrame (default=``"nfldb"``)
The data to be used to train the model. If ``"nfldb"``, will query the nfldb
database for the training data (note that this requires a correctly configured
installation of nfldb's database).
training_seasons : list of ints (default=``[2015]``)
What seasons to use to validate the model if getting data from the nfldb database.
If ``source_data`` is not ``"nfldb"``, this argument will be ignored.
**NOTE:** it is critical not to use the same data to validate the model as was used
in the fit. Generally a good data set to use for validation is one from a time
period more recent than was used to train the model. For instance, if the model was trained
on data from 2009-2014, data from the 2015 season would be a sensible choice to validate the model.
training_season_types : list of strings (default=``["Regular", "Postseason"]``)
If querying from the nfldb database, what parts of the seasons to use.
Options are "Preseason", "Regular", and "Postseason". If ``source_data`` is not
``"nfldb"``, this argument will be ignored.
target_colname : string or integer (default=``"offense_won"``)
The name of the target variable column.
Returns
-------
float, between 0 and 1
The combined p value, where smaller values indicate that the model is not accurately predicting win
probabilities.
Raises
------
NotFittedError
If the model hasn't been fit.
Notes
-----
Probabilities are computed between 1 and 99 percent because a single incorrect prediction at 100% or 0% automatically drives
the global p value to zero. Since the model is being smoothed this situation can occur even when there are no model predictions
at those extreme values, and therefore leads to erroneous p values.
While it seems reasonable (to me at least), I am not totally certain that this approach is entirely correct.
It's certainly sub-optimal in that you would ideally reject the null hypothesis that the model predictions
**aren't** appropriate, but that seems to be a much harder problem (and one that would need much more test
data to beat down the uncertainties involved). I'm also not sure if using Fisher's method is appropriate here,
and I wonder if it might be necessary to Monte Carlo this. I would welcome input from others on better ways to do this.
"""
if self.training_seasons is None:
raise NotFittedError("Must fit model before validating.")
self._validation_seasons = []
self._validation_season_types = []
if isinstance(source_data, str):
if source_data == "nfldb":
source_data = utilities.get_nfldb_play_data(season_years=validation_seasons,
season_types=validation_season_types)
self._validation_seasons = validation_seasons
self._validation_season_types = validation_season_types
else:
raise ValueError("WPModel: if source_data is a string, it must be 'nfldb'")
target_col = source_data[target_colname]
feature_cols = source_data.drop(target_colname, axis=1)
predicted_probabilities = self.model.predict_proba(feature_cols)[:,1]
self._sample_probabilities, self._predicted_win_percents, self._num_plays_used = (
WPModel._compute_predicted_percentages(target_col.values, predicted_probabilities))
#Compute the maximal deviation from a perfect prediction as well as the area under the
#curve of the residual between |predicted - perfect|:
max_deviation, residual_area = self._compute_prediction_statistics(self.sample_probabilities,
self.predicted_win_percents)
return max_deviation, residual_area
#Compute p-values for each where null hypothesis is that distributions are same, then combine
#them all to make sure data is not inconsistent with accurate predictions.
# combined_pvalue = self._test_distribution(self.sample_probabilities,
# self.predicted_win_percents,
# self.num_plays_used)
# return combined_pvalue
@staticmethod
def _compute_prediction_statistics(sample_probabilities, predicted_win_percents):
"""Take the KDE'd model estimates, then compute statistics.
Returns
-------
A tuple of (``max_deviation``, ``residual_area``), where ``max_deviation``
is the largest discrepancy between the model and expectation at any WP,
and ``residual_area`` is the total area under the curve of |predicted WP - expected WP|.
"""
abs_deviations = np.abs(predicted_win_percents - sample_probabilities)
max_deviation = np.max(abs_deviations)
residual_area = integrate.simps(abs_deviations,
sample_probabilities)
return (max_deviation, residual_area)
def predict_wp(self, plays):
"""Estimate the win probability for a set of plays.
Basically a simple wrapper around ``WPModel.model.predict_proba``,
takes in a DataFrame and then spits out an array of predicted
win probabilities.
Parameters
----------
plays : Pandas DataFrame
The input data to use to make the predictions.
Returns
-------
Numpy array, of length ``len(plays)``
Predicted probability that the offensive team in each play
will go on to win the game.
Raises
------
NotFittedError
If the model hasn't been fit.
"""
if self.training_seasons is None:
raise NotFittedError("Must fit model before predicting WP.")
return self.model.predict_proba(plays)[:,1]
def plot_validation(self, axis=None, **kwargs):
"""Plot the validation data.
Parameters
----------
axis : matplotlib.pyplot.axis object or ``None`` (default=``None``)
If provided, the validation line will be overlaid on ``axis``.
Otherwise, a new figure and axis will be generated and plotted on.
**kwargs
Arguments to ``axis.plot``.
Returns
-------
matplotlib.pylot.axis
The axis the plot was made on.
Raises
------
NotFittedError
If the model hasn't been fit **and** validated.
"""
if self.sample_probabilities is None:
raise NotFittedError("Must validate model before plotting.")
import matplotlib.pyplot as plt
if axis is None:
axis = plt.figure().add_subplot(111)
axis.plot([0, 100], [0, 100], ls="--", lw=2, color="black")
axis.set_xlabel("Predicted WP")
axis.set_ylabel("Actual WP")
axis.plot(self.sample_probabilities,
self.predicted_win_percents,
**kwargs)
return axis
@staticmethod
def _test_distribution(sample_probabilities, predicted_win_percents, num_plays_used):
"""Based off assuming the data at each probability is a Bernoulli distribution."""
#Get the p-values:
p_values = [stats.binom_test(np.round(predicted_win_percents[i] * num_plays_used[i]),
np.round(num_plays_used[i]),
p=sample_probabilities[i]) for i in range(len(sample_probabilities))]
combined_p_value = stats.combine_pvalues(p_values)[1]
return(combined_p_value)
@staticmethod
def _compute_predicted_percentages(actual_results, predicted_win_probabilities):
"""Compute the sample percentages from a validation data set.
"""
kde_offense_won = KernelDensity(kernel='gaussian', bandwidth=0.01).fit(
(predicted_win_probabilities[(actual_results == 1)])[:, np.newaxis])
kde_total = KernelDensity(kernel='gaussian', bandwidth=0.01).fit(
predicted_win_probabilities[:, np.newaxis])
sample_probabilities = np.linspace(0.01, 0.99, 99)
number_density_offense_won = np.exp(kde_offense_won.score_samples(sample_probabilities[:, np.newaxis])) * np.sum((actual_results))
number_density_total = np.exp(kde_total.score_samples(sample_probabilities[:, np.newaxis])) * len(actual_results)
number_offense_won = number_density_offense_won * np.sum(actual_results) / np.sum(number_density_offense_won)
number_total = number_density_total * len(actual_results) / np.sum(number_density_total)
predicted_win_percents = number_offense_won / number_total
return 100.*sample_probabilities, 100.*predicted_win_percents, number_total
def create_default_pipeline(self):
"""Create the default win probability estimation pipeline.
Returns
-------
Scikit-learn pipeline
The default pipeline, suitable for computing win probabilities
but by no means the best possible model.
This can be run any time a new default pipeline is required,
and either set to the ``model`` attribute or used independently.
"""
steps = []
offense_team_colname = "offense_team"
home_team_colname = "home_team"
home_score_colname = "curr_home_score"
away_score_colname = "curr_away_score"
down_colname = "down"
quarter_colname = "quarter"
time_colname = "seconds_elapsed"
yardline_colname = "yardline"
yards_to_go_colname="yards_to_go"
self.column_descriptions = {
offense_team_colname: "Abbreviation for the offensive team",
home_team_colname: "Abbreviation for the home team",
away_score_colname: "Abbreviation for the visiting team",
down_colname: "The current down",
yards_to_go_colname: "Yards to a first down (or the endzone)",
quarter_colname: "The quarter",
time_colname: "Seconds elapsed in the quarter",
yardline_colname: ("The yardline, given by (yards from own goalline - 50). "
"-49 is your own 1 while 49 is the opponent's 1.")
}
is_offense_home = preprocessing.ComputeIfOffenseIsHome(offense_team_colname,
home_team_colname,
copy=self.copy_data)
steps.append(("compute_offense_home", is_offense_home))
score_differential = preprocessing.CreateScoreDifferential(home_score_colname,
away_score_colname,
is_offense_home.offense_home_team_colname,
copy=self.copy_data)
steps.append(("create_score_differential", score_differential))
steps.append(("map_downs_to_int", preprocessing.MapToInt(down_colname, copy=self.copy_data)))
total_time_elapsed = preprocessing.ComputeElapsedTime(quarter_colname, time_colname, copy=self.copy_data)
steps.append(("compute_total_time_elapsed", total_time_elapsed))
steps.append(("remove_unnecessary_columns", preprocessing.CheckColumnNames(
column_names=[is_offense_home.offense_home_team_colname,
score_differential.score_differential_colname,
total_time_elapsed.total_time_colname,
yardline_colname,
yards_to_go_colname,
down_colname],
copy=self.copy_data)))
steps.append(("encode_categorical_columns", preprocessing.OneHotEncoderFromDataFrame(
categorical_feature_names=[down_colname],
copy=self.copy_data)))
search_grid = {'base_estimator__penalty': ['l1', 'l2'],
'base_estimator__C': [0.01, 0.1, 1, 10, 100]
}
base_model = LogisticRegression()
calibrated_model = CalibratedClassifierCV(base_model, cv=2, method="isotonic")
#grid_search_model = GridSearchCV(calibrated_model, search_grid,
# scoring=self._brier_loss_scorer)
steps.append(("compute_model", calibrated_model))
pipe = Pipeline(steps)
return pipe
def save_model(self, filename=None):
"""Save the WPModel instance to disk.
All models are saved to the same place, with the installed
NFLWin library (given by ``WPModel.model_directory``).
Parameters
----------
filename : string (default=None):
The filename to use for the saved model. If this parameter
is not specified, save to the default filename. Note that if a model
already lists with this filename, it will be overwritten. Note also that
this is a filename only, **not** a full path. If a full path is specified
it is likely (albeit not guaranteed) to cause errors.
Returns
-------
``None``
"""
if filename is None:
filename = self._default_model_filename
joblib.dump(self, os.path.join(self.model_directory, filename))
@classmethod
def load_model(cls, filename=None):
"""Load a saved WPModel.
Parameters
----------
Same as ``save_model``.
Returns
-------
``nflwin.WPModel`` instance.
"""
if filename is None:
filename = cls._default_model_filename
return joblib.load(os.path.join(cls.model_directory, filename))
@staticmethod
def _brier_loss_scorer(estimator, X, y):
"""Use the Brier loss to estimate model score.
For use in GridSearchCV, instead of accuracy.
"""
predicted_positive_probabilities = estimator.predict_proba(X)[:, 1]
return 1. - brier_score_loss(y, predicted_positive_probabilities)
| mit |
NunoEdgarGub1/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
ChinaQuants/zipline | zipline/finance/performance/tracker.py | 3 | 22840 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pickle
from six import iteritems
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
import zipline.finance.risk as risk
from . period import PerformancePeriod
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params, env):
self.sim_params = sim_params
self.env = env
self.period_start = self.sim_params.period_start
self.period_end = self.sim_params.period_end
self.last_close = self.sim_params.last_close
first_open = self.sim_params.first_open.tz_convert(
self.env.exchange_tz
)
self.day = pd.Timestamp(datetime(first_open.year, first_open.month,
first_open.day), tz='UTC')
self.market_open, self.market_close = env.get_open_and_close(self.day)
self.total_days = self.sim_params.days_in_period
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
all_trading_days = env.trading_days
mask = ((all_trading_days >= normalize_date(self.period_start)) &
(all_trading_days <= normalize_date(self.period_end)))
self.trading_days = all_trading_days[mask]
self.dividend_frame = pd.DataFrame()
self._dividend_count = 0
self.position_tracker = PositionTracker(asset_finder=env.asset_finder)
self.perf_periods = []
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.trading_days)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params, self.env)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min'))
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params, self.env,
create_first_day_stats=True)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
starting_cash=self.capital_base,
# the cumulative period will be calculated over the entire test.
period_open=self.period_start,
period_close=self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False,
asset_finder=self.env.asset_finder,
)
self.cumulative_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.cumulative_performance)
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
starting_cash=self.capital_base,
# the daily period will be calculated for the market day
period_open=self.market_open,
period_close=self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
asset_finder=self.env.asset_finder,
)
self.todays_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.todays_performance)
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.day_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.day_count / self.total_days
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def update_dividends(self, new_dividends):
"""
Update our dividend frame with new dividends. @new_dividends should be
a DataFrame with columns containing at least the entries in
zipline.protocol.DIVIDEND_FIELDS.
"""
# Mark each new dividend with a unique integer id. This ensures that
# we can differentiate dividends whose date/sid fields are otherwise
# identical.
new_dividends['id'] = np.arange(
self._dividend_count,
self._dividend_count + len(new_dividends),
)
self._dividend_count += len(new_dividends)
self.dividend_frame = pd.concat(
[self.dividend_frame, new_dividends]
).sort(['pay_date', 'ex_date']).set_index('id', drop=False)
def initialize_dividends_from_other(self, other):
"""
Helper for copying dividends to a new PerformanceTracker while
preserving dividend count. Useful if a simulation needs to create a
new PerformanceTracker mid-stream and wants to preserve stored dividend
info.
Note that this does not copy unpaid dividends.
"""
self.dividend_frame = other.dividend_frame
self._dividend_count = other._dividend_count
def handle_sid_removed_from_universe(self, sid):
"""
This method handles any behaviors that must occur when a SID leaves the
universe of the TradingAlgorithm.
Parameters
__________
sid : int
The sid of the Asset being removed from the universe.
"""
# Drop any dividends for the sid from the dividends frame
self.dividend_frame = self.dividend_frame[
self.dividend_frame.sid != sid
]
def update_performance(self):
# calculate performance as of last trade
for perf_period in self.perf_periods:
perf_period.calculate_performance()
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def _handle_event_price(self, event):
# updates last sale, and pays out a cash adjustment if applicable
cash_adjustment = self.position_tracker.update_last_sale(event)
if cash_adjustment != 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(cash_adjustment)
def process_trade(self, event):
self._handle_event_price(event)
def process_transaction(self, event):
self._handle_event_price(event)
self.txn_count += 1
self.position_tracker.execute_transaction(event)
for perf_period in self.perf_periods:
perf_period.handle_execution(event)
def process_dividend(self, dividend):
log.info("Ignoring DIVIDEND event.")
def process_split(self, event):
leftover_cash = self.position_tracker.handle_split(event)
if leftover_cash > 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(leftover_cash)
def process_order(self, event):
for perf_period in self.perf_periods:
perf_period.record_order(event)
def process_commission(self, event):
self.position_tracker.handle_commission(event)
for perf_period in self.perf_periods:
perf_period.handle_commission(event)
def process_benchmark(self, event):
if self.sim_params.data_frequency == 'minute' and \
self.sim_params.emission_rate == 'daily':
# Minute data benchmarks should have a timestamp of market
# close, so that calculations are triggered at the right time.
# However, risk module uses midnight as the 'day'
# marker for returns, so adjust back to midnight.
midnight = pd.tseries.tools.normalize_date(event.dt)
else:
midnight = event.dt
if midnight not in self.all_benchmark_returns.index:
raise AssertionError(
("Date %s not allocated in all_benchmark_returns. "
"Calendar seems to mismatch with benchmark. "
"Benchmark container is=%s" %
(midnight,
self.all_benchmark_returns.index)))
self.all_benchmark_returns[midnight] = event.returns
def process_close_position(self, event):
# CLOSE_POSITION events that contain prices that must be handled as
# a final trade event
if 'price' in event:
self.process_trade(event)
txn = self.position_tracker.\
maybe_create_close_position_transaction(event)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_trading_day):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if len(self.dividend_frame) == 0:
# We don't currently know about any dividends for this simulation
# period, so bail.
return
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
ex_date_mask = (self.dividend_frame['ex_date'] == next_trading_day)
dividends_earnable = self.dividend_frame[ex_date_mask]
# Dividends whose pay date is the next trading day. If we held any of
# these stocks on midnight before the ex_date, we need to pay these out
# now.
pay_date_mask = (self.dividend_frame['pay_date'] == next_trading_day)
dividends_payable = self.dividend_frame[pay_date_mask]
position_tracker = self.position_tracker
if len(dividends_earnable):
position_tracker.earn_dividends(dividends_earnable)
if not len(dividends_payable):
return
net_cash_payment = position_tracker.pay_dividends(dividends_payable)
for period in self.perf_periods:
# notify periods to update their stats
period.handle_dividends_paid(net_cash_payment)
def check_asset_auto_closes(self, next_trading_day):
"""
Check if the position tracker currently owns any Assets with an
auto-close date that is the next trading day. Close those positions.
Parameters
----------
next_trading_day : pandas.Timestamp
The next trading day of the simulation
"""
auto_close_events = self.position_tracker.auto_close_position_events(
next_trading_day=next_trading_day
)
for event in auto_close_events:
self.process_close_position(event)
def handle_minute_close(self, dt):
"""
Handles the close of the given minute. This includes handling
market-close functions if the given minute is the end of the market
day.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
(dict, dict/None)
A tuple of the minute perf packet and daily perf packet.
If the market day has not ended, the daily perf packet is None.
"""
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account)
minute_packet = self.to_dict(emission_type='minute')
# if this is the close, update dividends for the next day.
# Return the performance tuple
if dt == self.market_close:
return (minute_packet, self._handle_market_close(todays_date))
else:
return (minute_packet, None)
def handle_market_close_daily(self):
"""
Function called after handle_data when running with daily emission
rate.
"""
self.update_performance()
completed_date = self.day
account = self.get_account(False)
# update risk metrics for cumulative performance
self.cumulative_risk_metrics.update(
completed_date,
self.todays_performance.returns,
self.all_benchmark_returns[completed_date],
account)
return self._handle_market_close(completed_date)
def _handle_market_close(self, completed_date):
# increment the day counter before we move markers forward.
self.day_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
next_trading_day = self.env.next_trading_day(completed_date)
# Check if any assets need to be auto-closed before generating today's
# perf period
if next_trading_day:
self.check_asset_auto_closes(next_trading_day=next_trading_day)
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# move the market day markers forward
self.market_open, self.market_close = \
self.env.next_open_and_close(self.day)
self.day = self.env.next_trading_day(self.day)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# If the next trading day is irrelevant, then return the daily packet
if (next_trading_day is None) or (next_trading_day >= self.last_close):
return daily_update
# Check for any dividends and auto-closes, then return the daily perf
# packet
self.check_upcoming_dividends(next_trading_day=next_trading_day)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.day_count), m=self.total_days))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
self.risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl,
env=self.env)
risk_dict = self.risk_report.to_dict()
return risk_dict
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['dividend_frame'] = pickle.dumps(self.dividend_frame)
state_dict['_dividend_count'] = self._dividend_count
# we already store perf periods as attributes
del state_dict['perf_periods']
STATE_VERSION = 4
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 4
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PerformanceTracker saved state is too old.")
self.__dict__.update(state)
# Handle the dividend frame specially
self.dividend_frame = pickle.loads(state['dividend_frame'])
# properly setup the perf periods
self.perf_periods = []
p_types = ['cumulative', 'todays', 'minute']
for p_type in p_types:
name = p_type + '_performance'
period = getattr(self, name, None)
if period is None:
continue
period._position_tracker = self.position_tracker
self.perf_periods.append(period)
| apache-2.0 |
alexsavio/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 66 | 5806 | import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import silhouette_samples
from sklearn.metrics import pairwise_distances
from sklearn.metrics.cluster import calinski_harabaz_score
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert_greater(score_precomputed, 0)
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert_greater(score_precomputed, 0)
assert_greater(score_euclidean, 0)
assert_almost_equal(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean, score_dense_with_sampling)
def test_cluster_size_1():
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
# (cluster 0). We also test the case where there are identical samples
# as the only members of a cluster (cluster 2). To our knowledge, this case
# is not discussed in reference material, and we choose for it a sample
# score of 1.
X = [[0.], [1.], [1.], [2.], [3.], [3.]]
labels = np.array([0, 1, 1, 1, 2, 2])
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
# Cluster 1: intra-cluster = [.5, .5, 1]
# inter-cluster = [1, 1, 1]
# silhouette = [.5, .5, 0]
# Cluster 2: intra-cluster = [0, 0]
# inter-cluster = [arbitrary, arbitrary]
# silhouette = [1., 1.]
silhouette = silhouette_score(X, labels)
assert_false(np.isnan(silhouette))
ss = silhouette_samples(X, labels)
assert_array_equal(ss, [0, .5, .5, 0, 1, 1])
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels * 2 + 10), silhouette_score(X, labels))
assert_array_equal(
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
def test_calinski_harabaz_score():
rng = np.random.RandomState(seed=0)
# Assert message when there is only one label
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.zeros(10))
# Assert message when all point are in different clusters
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.arange(10))
# Assert the value is 1. when all samples are equals
assert_equal(1., calinski_harabaz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5))
# Assert the value is 0. when all the mean cluster are equal
assert_equal(0., calinski_harabaz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10))
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
assert_almost_equal(calinski_harabaz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
mozafari/verdict | tests/test_engine_pandasdb.py | 1 | 1930 | import pandas as pd
import time
from keebo.engine.pandasdb import *
db = PandasDB()
# table 1
data = [[1, 2], [1, 3]]
columns = ['col1', 'col2']
db.create_table('table1', pd.DataFrame(data, columns=columns))
# table 2
data = [[4, 5], [4, 6]]
columns = ['col3', 'col4']
db.create_table('table2', pd.DataFrame(data, columns=columns))
def with_elapsed(func, *arg):
start = time.time()
result = func(*arg)
elapsed = time.time() - start
print(f'Elapsed time: {elapsed} secs')
def test_project():
query = '''\
{
"op": "project",
"source": "table table1",
"arg": {
"alias1": "attr col1",
"alias2": "attr col2"
}
}'''
start = time.time()
result = with_elapsed(db.execute, query)
elapsed = time.time() - start
print(result)
def test_agg():
query = '''\
{
"op": "agg",
"source": "table table1",
"arg": {
"alias1": {
"op": "sum",
"arg": [ "attr col2" ]
}
}
}'''
result = with_elapsed(db.execute, query)
print(result)
query = '''\
{
"op": "agg",
"source": {
"op": "groupby",
"source": "table table1",
"arg": [ "attr col1" ]
},
"arg": {
"alias1": {
"op": "sum",
"arg": [ "attr col2" ]
}
}
}'''
result = with_elapsed(db.execute, query)
print(result)
def test_select():
query = '''\
{
"op": "select",
"source": "table table1",
"arg": {
"op": "eq",
"arg": [ "attr col2", 2 ]
}
}'''
result = with_elapsed(db.execute, query)
print(result)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.