repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bastibl/gr-ieee802-15-4 | python/qa_preamble_tagger_cc.py | 4 | 2362 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <[email protected]>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
import numpy as np
import matplotlib.pyplot as plt
class qa_preamble_tagger_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
len_preamble = 7
len_payload = 2
len_frame = len_preamble + len_payload
preamble = np.ones((len_preamble,))
payload = np.zeros((len_payload,))
payload[0] = -1
frame0 = np.concatenate((preamble, payload))
frame1 = np.concatenate((preamble, payload))
frame2 = np.concatenate((preamble, payload))
frames = np.concatenate((frame0, frame1, frame2))
data_in = np.concatenate((frames, frames))
src = blocks.vector_source_c(data_in)
tagger = ieee802_15_4.preamble_tagger_cc(len_preamble)
framer = ieee802_15_4.frame_buffer_cc(len_frame)
snk = blocks.vector_sink_c()
self.tb.connect(src, tagger, framer, snk)
self.tb.run ()
# check data
data_out = snk.data()
# plt.plot(data_in, 'b')
# plt.plot(np.real(data_out), 'g')
# plt.grid()
# plt.ylim([-1.5, 1.5])
# plt.show()
self.assertComplexTuplesAlmostEqual(data_in[:len(data_out)], data_out)
if __name__ == '__main__':
gr_unittest.run(qa_preamble_tagger_cc)
| gpl-3.0 |
shirtsgroup/pygo | analysis/MBAR_4_state_pmf.py | 1 | 7847 | #!/usr/bin/python2.4
import numpy
from math import *
import pymbar
import timeseries
import commands
import os
import pdb
#import matplotlib.pyplot as plt
import optparse
import wham
import cPickle
import MBAR_pmfQz
def parse_args():
parser = optparse.OptionParser(description='Calculates the PMF(Q,z)')
# parser.add_option('-t','--temp', dest= 'temp', nargs = 2, type = 'float', help = 'desired temperatures')
parser.add_option('--tfile', dest='tfile', default='T.txt', help = 'simulation temperature file')
parser.add_option('--direc', dest='direc', help='directory of simulation data')
parser.add_option("-n", "--N_max", default=100000, type="int",dest="N_max", help="number of data points to read in (default: 100k)")
parser.add_option("-s", "--skip", default=1, type="int",dest="skip", help="skip every n data points")
# parser.add_option('--f_file', dest='f_file', default='', help='free energy filename, if it exists')
parser.add_option('--cpt', action="store_true", default=False, help="use checkpoint files, if they exist")
(options,args) = parser.parse_args()
return options
def get_4_state_bins(bin_centers,K,N_max,indices,Q_kn,z_kn):
print 'Binning data...'
dz = 3.0
dQ = .15
bin_kn = numpy.zeros([K,N_max],numpy.int16)
bin_counts = []
for i,bin in enumerate(bin_centers):
z = bin[0]
Q = bin[1]
in_bin = (Q-dQ/2 <= Q_kn[indices]) & (Q_kn[indices] < Q+dQ/2) & (z-dz/2 <= z_kn[indices]) & (z_kn[indices] < z+dz/2)
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = i
return bin_counts, bin_kn
def get_4_state_bins_alldata(Q_cutoff,z_cutoff,K,N_max,indices,Q_kn,z_kn):
print 'Binning data...'
bin_kn = numpy.zeros([K,N_max],numpy.int16)
bin_counts = []
z_cutoff_adsorbed = 17
z_cutoff_desorbed = 31
# unfolded, adsorbed
in_bin = (Q_kn[indices] <= Q_cutoff) & (z_kn[indices] <= z_cutoff_adsorbed)
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 0
# folded, adsorbed
in_bin = (Q_kn[indices] > Q_cutoff) & (z_kn[indices] <= z_cutoff_desorbed)
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 1
# unfolded, unadsorbed
in_bin = (Q_kn[indices] <= Q_cutoff) & (z_kn[indices] > z_cutoff_desorbed)
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 2
# folded, unadsorbed
in_bin = (Q_kn[indices] > Q_cutoff) & (z_kn[indices] > z_cutoff_desorbed)
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 3
return bin_counts, bin_kn
def get_cutoff(Q_kn,func_file):
data = numpy.loadtxt(func_file)
Q = data[0,:]
bins = len(Q)
assert bins == 51
z = data[1,:]
z_cutoff_index = Q_kn*(bins-1)
z_cutoff_index = z_cutoff_index.astype(int)
z_cutoff = z[z_cutoff_index]
return z_cutoff
def get_4_state_bins_varz(Q_cutoff,K,N_max,indices,Q_kn,z_kn):
print 'Binning data...'
bin_kn = numpy.zeros([K,N_max],numpy.int16)
bin_counts = []
z_cutoff = get_cutoff(Q_kn,'/home/edz3fz/proteinmontecarlo/z_cutoff_5.txt')
# unfolded, adsorbed
in_bin = (Q_kn[indices] <= Q_cutoff) & (z_kn[indices] <= z_cutoff[indices])
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 0
# folded, adsorbed
in_bin = (Q_kn[indices] > Q_cutoff) & (z_kn[indices] <= z_cutoff[indices])
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 1
# unfolded, unadsorbed
in_bin = (Q_kn[indices] <= Q_cutoff) & (z_kn[indices] > z_cutoff[indices])
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 2
# folded, unadsorbed
in_bin = (Q_kn[indices] > Q_cutoff) & (z_kn[indices] > z_cutoff[indices])
bin_count = in_bin.sum()
indices_in_bin = (indices[0][in_bin], indices[1][in_bin])
bin_counts.append(bin_count)
bin_kn[indices_in_bin] = 3
return bin_counts, bin_kn
def main():
# read in parameters
options = parse_args()
# direc = options.direc
# tfile = options.tfile
# N_max = options.N_max
# skip = options.skip
# set constants
kB = 0.00831447/4.184
nbins_per = 25
spring_constant = 1
# get temperature and distance states
T = numpy.loadtxt(options.tfile)
# T = numpy.array([305.,320.,330.,340.]) # smaller subset for testing purposes
beta_k = 1 / (kB * T)
print 'temperature states are\n', T
Z = numpy.arange(9,31.5,1.5)
Z = numpy.concatenate((Z,numpy.array([33,36,39,42,45,48])))
# Z = numpy.array([15,16.5,18]) # smaller subset for testing purposes
print 'distance states are\n', Z
K = len(T)*len(Z)
# read in data
U_kn, Q_kn, z_kn, N_max = MBAR_pmfQz.read_data(options, K, Z, T, spring_constant)
# produce a histogram of Q and z
# plt.hist(numpy.reshape(z_kn,N_max*K),400)
# plt.savefig('%s/z_hist.png' % options.direc)
# test for statistical inefficiencies
U_kn, Q_kn, z_kn, N_k = MBAR_pmfQz.subsample(U_kn, Q_kn, z_kn, K, N_max)
# generate a list of indices of all configurations in kn-indicing
mask_kn = numpy.zeros([K,N_max], dtype=numpy.bool)
for k in range(0,K):
mask_kn[k,0:N_k[k]] = True
indices = numpy.where(mask_kn)
# compute reduced potential energy of all snapshots at all temperatures and distances
u_kln = MBAR_pmfQz.get_ukln(options, N_max, K, Z, T, spring_constant, U_kn, z_kn, N_k, beta_k)
# bin data for PMF calculation
nbins = 4
#bin_centers = [(10.5,.225),(13.5,.925),(28.5,.225),(28.5,.925)]
bin_centers = [(13.5,.225),(13.5,.925),(40.5,.225),(40.5,.925)]
Q_cutoff = 0.6
#bin_counts, bin_kn = get_4_state_bins_varz(Q_cutoff, K, N_max, indices, Q_kn, z_kn)
bin_counts, bin_kn = get_4_state_bins_alldata(Q_cutoff, 30, K, N_max, indices, Q_kn, z_kn)
print '%i bins were populated:' %nbins
for i in range(nbins):
print 'bin %5i (%6.1f, %6.1f) %12i conformations' % (i, bin_centers[i][0], bin_centers[i][1], bin_counts[i])
# use WHAM to quickly compute an initial guess of dimensionless free energies f_k
# then initialize MBAR
mbar = MBAR_pmfQz.get_mbar(options, beta_k, Z, U_kn, N_k, u_kln)
# calculate PMF at the target temperatures
target_temperatures = numpy.arange(295.,360.,5)
f_i = numpy.zeros((len(target_temperatures),nbins))
df_i = []
for i,temp in enumerate(target_temperatures):
print 'Calculating the PMF at', temp
target_beta = 1.0 / (kB * temp)
u_kn = target_beta * U_kn
f_i[i,:], d2f_i = mbar.computePMF_states(u_kn, bin_kn, nbins)
# imin = f_i.argmin()
# for j in range(nbins):
# df_i[j,i] = sqrt(d2f_i[j,imin]) # uncertainty relative to lowest free energy
df_i.append(d2f_i)
results_file = '%s/dG_raw_noint_2.pkl' % options.direc
f = file(results_file,'wb')
print 'Saving target temperatures, bin centers, f_i, df_i to %s' % results_file
cPickle.dump(target_temperatures,f)
cPickle.dump(bin_centers,f)
cPickle.dump(f_i,f)
cPickle.dump(df_i,f)
f.close()
if __name__ == '__main__':
main()
#plot in separate script by reading in dG_raw.pkl
| gpl-2.0 |
IBT-FMI/SAMRI | samri/plotting/connectivity.py | 1 | 2262 | import numpy as np
import collections
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from numpy import genfromtxt
from os import path
from pandas import read_csv
def fix_labels_mapping_txt(labels,
):
ret = {}
for idx, item in enumerate(labels):
if(idx<14):
continue
ret[int(item.split()[0])] = " ".join(item.split()[7:])
ret = collections.OrderedDict(sorted(ret.items()))
ret = np.array(ret.items())[:,1]
return ret
def fix_labels(labels,
):
#TODO: double check if no missing or no double
ret = {}
for label in labels:
ret[int(label[1])] = label[0] + '_right'
ret[int(label[2])] = label[0] + '_left'
ret = collections.OrderedDict(sorted(ret.items()))
ret = np.array(ret.items())[:,1]
return ret
def plot_connectivity_matrix(correlation_matrix,
figsize = (50,50),
labels = '',
save_as = '',
):
"""Plot correlation_matrix
Parameters
----------
correlation_matrix : real matrix
Path to correlation matrix as csv file.
figsize : (int,int)
Tupel defining plotsize.
labels : str
Path to csv file containing annotations for NIFTI atlas.
"""
#TODO: fomatting
labels = path.abspath(path.expanduser(labels))
# fix labels loaded from website (through templates.py)
if('itksnap' in labels):
with open(labels) as f:
content = f.readlines()
labels_np = fix_labels_mapping_txt(content)
else:
labels_np = read_csv(labels)
labels_np = fix_labels(labels_np.as_matrix(['Structure','right label','left label']))
if isinstance(correlation_matrix, str):
correlation_matrix = path.abspath(path.expanduser(correlation_matrix))
correlation_matrix = genfromtxt(correlation_matrix, delimiter=',')
plt.figure(figsize=figsize)
np.fill_diagonal(correlation_matrix, 0)
plt.imshow(correlation_matrix,
interpolation="nearest",
cmap="RdBu_r",
vmax=0.8,
vmin=-0.8,
aspect='auto'
)
x_ticks = plt.xticks(range(len(labels_np) - 1), labels_np[1:], rotation=90)
y_ticks = plt.yticks(range(len(labels_np) - 1), labels_np[1:])
plt.gca().yaxis.tick_left()
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=75)
# plt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)
if(save_as):
plt.savefig(path.abspath(path.expanduser(save_as)))
return plt
| gpl-3.0 |
adamrvfisher/TechnicalAnalysisLibrary | ModADXAdviceColumn.py | 1 | 1933 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 11 23:32:44 2017
@author: AmatVictoriaCuramIII
"""
from DefModADXAdviceGiver import DefModADXAdviceGiver
import numpy as np
import pandas as pd
from pandas_datareader import data
Aggregate = pd.read_pickle('RUTModADXAGGSHARPE065')
Aggregate = Aggregate.loc[:,~Aggregate.columns.duplicated()]
ticker = '^RUT'
#s = data.DataReader(ticker, 'yahoo', start='01/01/1994', end='01/01/2007')
s = pd.read_pickle('RUTModADXAGGAdviceColumn94_07') # this is just for testing with a graph
#s2 = pd.DataFrame({'Open':[1419.57],'High':[1423.52],'Low':[1413.68],'Close':[0],'Volume':[0],
#'Adj Close':[1417.51]},index = ['2017-04-27 00:00:00']) #interday
#s = pd.concat([s,s2],axis = 0)
#ranger = range(1,len(s)+1)
#dictionary = { r : s.loc[s.index[:r],:] for r in ranger}
#triumph = []
#for r in ranger:
# q = dictionary[r]
# result = DefModADXAdviceGiver(Aggregate, q)
# triumph.append(result)
# print(r)
# print(result)
#TheAdvice = pd.Series(triumph, index=s.index)
#s['Advice'] = TheAdvice
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
s['Regime'] = np.where(s['Advice'] > -1.969, 1, 0)
s['Regime'] = np.where(s['Advice'] < -1.562601, -1, s['Regime'])
s['Strategy'] = (s['Regime']).shift(1)*s['LogRet']
s['Strategy'] = s['Strategy'].fillna(0)
endgains = 1
endreturns = 1
s['sharpe'] = (s['Strategy'].mean()-abs(s['LogRet'].mean()))/s['Strategy'].std()
for g in s['LogRet']:
slate = endreturns * (1+-g)
endreturns = slate
for h in s['Strategy']:
otherslate = endgains * (1+h)
endgains = otherslate
#For increased accuracy, remove first window values from TheAdvice
s[['LogRet', 'Strategy']].cumsum().apply(np.exp).plot(grid = True,
figsize = (8,5))
print(s)
print(s['sharpe'][-1])
print(endreturns)
print(endgains) | apache-2.0 |
zijistark/ck2utils | esc/eu4dev_map.py | 1 | 6805 | #!/usr/bin/env python3
from collections import defaultdict
from pathlib import Path
import re
import sys
import matplotlib.cm
import matplotlib.colors
import numpy as np
from PIL import Image, ImageFont, ImageDraw
from ck2parser import rootpath, csv_rows, SimpleParser
from localpaths import eu4dir
from print_time import print_time
@print_time
def main():
parser = SimpleParser()
parser.basedir = eu4dir
if len(sys.argv) > 1:
parser.moddirs.append(Path(sys.argv[1]))
rgb_number_map = {}
default_tree = parser.parse_file('map/default.map')
provinces_path = parser.file('map/' + default_tree['provinces'].val)
climate_path = parser.file('map/' + default_tree['climate'].val)
max_provinces = default_tree['max_provinces'].val
provs_to_label = set()
colors = {
'sea': np.uint8((51, 67, 85)),
'desert': np.uint8((36, 36, 36))
}
prov_color_lut = np.empty(max_provinces, '3u1')
for row in csv_rows(parser.file('map/' + default_tree['definitions'].val)):
try:
number = int(row[0])
except ValueError:
continue
if number < max_provinces:
rgb = tuple(np.uint8(row[1:4]))
rgb_number_map[rgb] = np.uint16(number)
provs_to_label.add(number)
province_values = {}
for path in parser.files('history/provinces/*'):
match = re.match(r'\d+', path.stem)
if not match:
continue
number = int(match.group())
if number >= max_provinces:
continue
if number in province_values:
print('extra province history {}'.format(path), file=sys.stderr)
continue
properties = {
'base_tax': 0,
'base_production': 0,
'base_manpower': 0,
}
history = defaultdict(list)
for n, v in parser.parse_file(path):
if n.val in properties:
properties[n.val] = v.val
elif isinstance(n.val, tuple) and n.val <= (1444, 11, 11):
history[n.val].extend((n2.val, v2.val) for n2, v2 in v
if n2.val in properties)
properties.update(p2 for _, v in sorted(history.items()) for p2 in v)
province_values[number] = properties
for n in parser.parse_file(climate_path)['impassable']:
prov_color_lut[int(n.val)] = colors['desert']
provs_to_label.discard(int(n.val))
for n in default_tree['sea_starts']:
prov_color_lut[int(n.val)] = colors['sea']
provs_to_label.discard(int(n.val))
for n in default_tree['lakes']:
prov_color_lut[int(n.val)] = colors['sea']
provs_to_label.discard(int(n.val))
for n in default_tree['only_used_for_random']:
provs_to_label.discard(int(n.val))
image = Image.open(str(provinces_path))
a = np.array(image).view('u1,u1,u1')[..., 0]
b = np.vectorize(lambda x: rgb_number_map[tuple(x)], otypes=[np.uint16])(a)
font = ImageFont.truetype(str(rootpath / 'ck2utils/esc/NANOTYPE.ttf'), 16)
mod = parser.moddirs[0].name.lower() + '_' if parser.moddirs else ''
borders_path = rootpath / (mod + 'eu4borderlayer.png')
borders = Image.open(str(borders_path))
for value_func, name in [(lambda x: sum(x.values()), ''),
(lambda x: x['base_tax'], 'tax'),
(lambda x: x['base_production'], 'prod'),
(lambda x: x['base_manpower'], 'man')]:
province_value = {n: value_func(province_values[n])
for n in provs_to_label}
vmin, vmax = 0, max(province_value.values())
cmap = matplotlib.cm.get_cmap('plasma')
norm = matplotlib.colors.Normalize(vmin, vmax * 4 / 3)
colormap = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
for number, value in province_value.items():
prov_color_lut[number] = colormap.to_rgba(value, bytes=True)[:3]
txt = Image.new('RGBA', image.size, (0, 0, 0, 0))
lines = Image.new('RGBA', image.size, (0, 0, 0, 0))
draw_txt = ImageDraw.Draw(txt)
draw_lines = ImageDraw.Draw(lines)
maxlen = len(str(vmax))
e = {(n * 4 - 1, 5): np.ones_like(b, bool)
for n in range(1, maxlen + 1)}
for number in sorted(provs_to_label):
print('\r' + str(number), end='', file=sys.stderr)
value = province_value[number]
size = len(str(value)) * 4 - 1, 5
c = np.nonzero(b == number)
if len(c[0]) == 0:
continue
center = np.mean(c[1]), np.mean(c[0])
pos = [int(round(max(0, min(center[0] - size[0] / 2,
image.width - size[0])))),
int(round(max(0, min(center[1] - size[1] / 2,
image.height - size[1]))))]
pos[2:] = pos[0] + size[0], pos[1] + size[1]
if not e[size][pos[1], pos[0]]:
x1, x2 = max(0, pos[0] - 1), min(pos[0] + 2, image.width)
y1, y2 = max(0, pos[1] - 1), min(pos[1] + 2, image.height)
if not np.any(e[size][y1:y2, x1:x2]):
x1, y1, (x2, y2) = 0, 0, image.size
f = np.nonzero(e[size][y1:y2, x1:x2])
g = (f[0] - pos[1]) ** 2 + (f[1] - pos[0]) ** 2
pos[:2] = np.transpose(f)[np.argmin(g)][::-1] + [x1, y1]
pos[2:] = pos[0] + size[0], pos[1] + size[1]
draw_txt.text((pos[0], pos[1] - 6), str(value),
fill=(255, 255, 255, 255), font=font)
for size2 in e:
rows = slice(max(pos[1] - size2[1] - 1, 0), pos[3] + 2)
cols = slice(max(pos[0] - size2[0] - 1, 0), pos[2] + 2)
e[size2][rows, cols] = False
x = int(round(pos[0] + size[0] / 2))
y = int(round(pos[1] + size[1] / 2))
if b[y, x] != number:
d = (c[0] - y) ** 2 + (c[1] - x) ** 2
dest = tuple(np.transpose(c)[np.argmin(d)][::-1])
start = (max(pos[0] - 1, min(dest[0], pos[2])),
max(pos[1] - 1, min(dest[1], pos[3])))
if start != dest:
print('\rline drawn for {}'.format(number),
file=sys.stderr)
draw_lines.line([start, dest], fill=(176, 176, 176))
print('', file=sys.stderr)
out = Image.fromarray(prov_color_lut[b])
out.paste(borders, mask=borders)
out.paste(lines, mask=lines)
out.paste(txt, mask=txt)
out_path = rootpath / (mod + 'eu4dev{}_map.png'.format(name))
out.save(str(out_path))
if __name__ == '__main__':
main()
| gpl-2.0 |
thunderhoser/GewitterGefahr | gewittergefahr/interpretation_paper_2019/make_saliency_figure.py | 1 | 18459 | """Makes figure with saliency maps."""
import os
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from PIL import Image
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import saliency_maps
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import saliency_plotting
from gewittergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
RADAR_HEIGHTS_M_AGL = numpy.array([2000, 6000, 10000], dtype=int)
RADAR_FIELD_NAMES = [
radar_utils.REFL_NAME, radar_utils.VORTICITY_NAME,
radar_utils.SPECTRUM_WIDTH_NAME
]
MAX_COLOUR_PERCENTILE = 99.
COLOUR_BAR_LENGTH = 0.25
PANEL_NAME_FONT_SIZE = 30
COLOUR_BAR_FONT_SIZE = 25
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 150
TITLE_FONT_NAME = 'DejaVu-Sans-Bold'
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
INPUT_FILES_ARG_NAME = 'input_saliency_file_names'
COMPOSITE_NAMES_ARG_NAME = 'composite_names'
COLOUR_MAP_ARG_NAME = 'colour_map_name'
MAX_VALUES_ARG_NAME = 'max_colour_values'
HALF_NUM_CONTOURS_ARG_NAME = 'half_num_contours'
SMOOTHING_RADIUS_ARG_NAME = 'smoothing_radius_grid_cells'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
INPUT_FILES_HELP_STRING = (
'List of saliency files (each will be read by `saliency.read_file`).'
)
COMPOSITE_NAMES_HELP_STRING = (
'List of composite names (one for each saliency file). This list must be '
'space-separated, but after reading the list, underscores within each item '
'will be replaced by spaces.'
)
COLOUR_MAP_HELP_STRING = (
'Colour scheme for saliency. Must be accepted by '
'`matplotlib.pyplot.get_cmap`.'
)
MAX_VALUES_HELP_STRING = (
'Max absolute saliency in each colour scheme (one per file). Use -1 to let'
' max value to be determined on the fly.'
)
HALF_NUM_CONTOURS_HELP_STRING = (
'Number of saliency contours on either side of zero (positive and '
'negative).'
)
SMOOTHING_RADIUS_HELP_STRING = (
'e-folding radius for Gaussian smoother (num grid cells). If you do not '
'want to smooth saliency maps, make this negative.'
)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=INPUT_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COMPOSITE_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=COMPOSITE_NAMES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COLOUR_MAP_ARG_NAME, type=str, required=False, default='binary',
help=COLOUR_MAP_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_VALUES_ARG_NAME, type=float, nargs='+', required=True,
help=MAX_VALUES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + HALF_NUM_CONTOURS_ARG_NAME, type=int, required=False,
default=10, help=HALF_NUM_CONTOURS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + SMOOTHING_RADIUS_ARG_NAME, type=float, required=False,
default=1., help=SMOOTHING_RADIUS_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _read_one_composite(saliency_file_name, smoothing_radius_grid_cells):
"""Reads saliency map for one composite.
E = number of examples
M = number of rows in grid
N = number of columns in grid
H = number of heights in grid
F = number of radar fields
:param saliency_file_name: Path to input file (will be read by
`saliency.read_file`).
:param smoothing_radius_grid_cells: Radius for Gaussian smoother, used only
for saliency map.
:return: mean_radar_matrix: E-by-M-by-N-by-H-by-F numpy array with mean
radar fields.
:return: mean_saliency_matrix: E-by-M-by-N-by-H-by-F numpy array with mean
saliency fields.
:return: model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
"""
print('Reading data from: "{0:s}"...'.format(saliency_file_name))
saliency_dict = saliency_maps.read_file(saliency_file_name)[0]
mean_radar_matrix = numpy.expand_dims(
saliency_dict[saliency_maps.MEAN_PREDICTOR_MATRICES_KEY][0], axis=0
)
mean_saliency_matrix = numpy.expand_dims(
saliency_dict[saliency_maps.MEAN_SALIENCY_MATRICES_KEY][0], axis=0
)
if smoothing_radius_grid_cells is not None:
print((
'Smoothing saliency maps with Gaussian filter (e-folding radius of '
'{0:.1f} grid cells)...'
).format(
smoothing_radius_grid_cells
))
num_fields = mean_radar_matrix.shape[-1]
for k in range(num_fields):
mean_saliency_matrix[0, ..., k] = (
general_utils.apply_gaussian_filter(
input_matrix=mean_saliency_matrix[0, ..., k],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
)
model_file_name = saliency_dict[saliency_maps.MODEL_FILE_KEY]
model_metafile_name = cnn.find_metafile(model_file_name)
print('Reading CNN metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
good_indices = numpy.array([
numpy.where(
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] == h
)[0][0]
for h in RADAR_HEIGHTS_M_AGL
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices, :]
mean_saliency_matrix = mean_saliency_matrix[..., good_indices, :]
good_indices = numpy.array([
training_option_dict[trainval_io.RADAR_FIELDS_KEY].index(f)
for f in RADAR_FIELD_NAMES
], dtype=int)
mean_radar_matrix = mean_radar_matrix[..., good_indices]
mean_saliency_matrix = mean_saliency_matrix[..., good_indices]
training_option_dict[trainval_io.RADAR_HEIGHTS_KEY] = RADAR_HEIGHTS_M_AGL
training_option_dict[trainval_io.RADAR_FIELDS_KEY] = RADAR_FIELD_NAMES
training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] = None
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] = training_option_dict
return mean_radar_matrix, mean_saliency_matrix, model_metadata_dict
def _overlay_text(
image_file_name, x_offset_from_center_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_center_px: Center-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
command_string = (
'"{0:s}" "{1:s}" -gravity north -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_NAME,
x_offset_from_center_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _plot_one_composite(
saliency_file_name, composite_name_abbrev, composite_name_verbose,
colour_map_object, max_colour_value, half_num_contours,
smoothing_radius_grid_cells, output_dir_name):
"""Plots saliency map for one composite.
:param saliency_file_name: Path to input file (will be read by
`saliency.read_file`).
:param composite_name_abbrev: Abbrev composite name (will be used in file
names).
:param composite_name_verbose: Verbose composite name (will be used in
figure title).
:param colour_map_object: See documentation at top of file.
:param max_colour_value: Same.
:param half_num_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param output_dir_name: Name of output directory (figures will be saved
here).
:return: main_figure_file_name: Path to main image file created by this
method.
:return: max_colour_value: See input doc.
"""
mean_radar_matrix, mean_saliency_matrix, model_metadata_dict = (
_read_one_composite(
saliency_file_name=saliency_file_name,
smoothing_radius_grid_cells=smoothing_radius_grid_cells)
)
if numpy.isnan(max_colour_value):
max_colour_value = numpy.percentile(
mean_saliency_matrix, MAX_COLOUR_PERCENTILE
)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
field_names = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
num_fields = mean_radar_matrix.shape[-1]
num_heights = mean_radar_matrix.shape[-2]
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=[mean_radar_matrix],
model_metadata_dict=model_metadata_dict, pmm_flag=True,
allow_whitespace=True, plot_panel_names=True,
panel_name_font_size=PANEL_NAME_FONT_SIZE,
add_titles=False, label_colour_bars=True,
colour_bar_length=COLOUR_BAR_LENGTH,
colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
num_panel_rows=num_heights)
figure_objects = handle_dict[plot_examples.RADAR_FIGURES_KEY]
axes_object_matrices = handle_dict[plot_examples.RADAR_AXES_KEY]
for k in range(num_fields):
this_saliency_matrix = mean_saliency_matrix[0, ..., k]
saliency_plotting.plot_many_2d_grids_with_contours(
saliency_matrix_3d=numpy.flip(this_saliency_matrix, axis=0),
axes_object_matrix=axes_object_matrices[k],
colour_map_object=colour_map_object,
max_absolute_contour_level=max_colour_value,
contour_interval=max_colour_value / half_num_contours
)
panel_file_names = [None] * num_fields
for k in range(num_fields):
panel_file_names[k] = '{0:s}/{1:s}_{2:s}.jpg'.format(
output_dir_name, composite_name_abbrev,
field_names[k].replace('_', '-')
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[k]))
figure_objects[k].savefig(
panel_file_names[k], dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_objects[k])
main_figure_file_name = '{0:s}/{1:s}_saliency.jpg'.format(
output_dir_name, composite_name_abbrev)
print('Concatenating panels to: "{0:s}"...'.format(main_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=main_figure_file_name,
num_panel_rows=1, num_panel_columns=num_fields, border_width_pixels=50)
imagemagick_utils.resize_image(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25)
_overlay_text(
image_file_name=main_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose)
imagemagick_utils.trim_whitespace(
input_file_name=main_figure_file_name,
output_file_name=main_figure_file_name,
border_width_pixels=10)
return main_figure_file_name, max_colour_value
def _add_colour_bar(figure_file_name, colour_map_object, max_colour_value,
temporary_dir_name):
"""Adds colour bar to saved image file.
:param figure_file_name: Path to saved image file. Colour bar will be added
to this image.
:param colour_map_object: Colour scheme (instance of `matplotlib.pyplot.cm`
or similar).
:param max_colour_value: Max value in colour scheme.
:param temporary_dir_name: Name of temporary output directory.
"""
this_image_matrix = Image.open(figure_file_name)
figure_width_px, figure_height_px = this_image_matrix.size
figure_width_inches = float(figure_width_px) / FIGURE_RESOLUTION_DPI
figure_height_inches = float(figure_height_px) / FIGURE_RESOLUTION_DPI
extra_figure_object, extra_axes_object = pyplot.subplots(
1, 1, figsize=(figure_width_inches, figure_height_inches)
)
extra_axes_object.axis('off')
dummy_values = numpy.array([0., max_colour_value])
colour_bar_object = plotting_utils.plot_linear_colour_bar(
axes_object_or_matrix=extra_axes_object, data_matrix=dummy_values,
colour_map_object=colour_map_object,
min_value=0., max_value=max_colour_value,
orientation_string='vertical', fraction_of_axis_length=1.25,
extend_min=False, extend_max=True, font_size=COLOUR_BAR_FONT_SIZE,
aspect_ratio=50.
)
tick_values = colour_bar_object.get_ticks()
if max_colour_value <= 0.005:
tick_strings = ['{0:.4f}'.format(v) for v in tick_values]
elif max_colour_value <= 0.05:
tick_strings = ['{0:.3f}'.format(v) for v in tick_values]
else:
tick_strings = ['{0:.2f}'.format(v) for v in tick_values]
colour_bar_object.set_ticks(tick_values)
colour_bar_object.set_ticklabels(tick_strings)
extra_file_name = '{0:s}/saliency_colour-bar.jpg'.format(temporary_dir_name)
print('Saving colour bar to: "{0:s}"...'.format(extra_file_name))
extra_figure_object.savefig(
extra_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(extra_figure_object)
print('Concatenating colour bar to: "{0:s}"...'.format(figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=[figure_file_name, extra_file_name],
output_file_name=figure_file_name,
num_panel_rows=1, num_panel_columns=2,
extra_args_string='-gravity Center'
)
os.remove(extra_file_name)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name
)
def _run(saliency_file_names, composite_names, colour_map_name,
max_colour_values, half_num_contours, smoothing_radius_grid_cells,
output_dir_name):
"""Makes figure with saliency maps for MYRORSS model.
This is effectively the main method.
:param saliency_file_names: See documentation at top of file.
:param composite_names: Same.
:param colour_map_name: Same.
:param max_colour_values: Same.
:param half_num_contours: Same.
:param smoothing_radius_grid_cells: Same.
:param output_dir_name: Same.
"""
# Process input args.
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
if smoothing_radius_grid_cells <= 0:
smoothing_radius_grid_cells = None
colour_map_object = pyplot.cm.get_cmap(colour_map_name)
error_checking.assert_is_geq(half_num_contours, 5)
num_composites = len(saliency_file_names)
expected_dim = numpy.array([num_composites], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(composite_names), exact_dimensions=expected_dim
)
max_colour_values[max_colour_values <= 0] = numpy.nan
error_checking.assert_is_numpy_array(
max_colour_values, exact_dimensions=expected_dim
)
composite_names_abbrev = [
n.replace('_', '-').lower() for n in composite_names
]
composite_names_verbose = [
'({0:s}) {1:s}'.format(
chr(ord('a') + i), composite_names[i].replace('_', ' ')
)
for i in range(num_composites)
]
panel_file_names = [None] * num_composites
for i in range(num_composites):
panel_file_names[i], max_colour_values[i] = _plot_one_composite(
saliency_file_name=saliency_file_names[i],
composite_name_abbrev=composite_names_abbrev[i],
composite_name_verbose=composite_names_verbose[i],
colour_map_object=colour_map_object,
max_colour_value=max_colour_values[i],
half_num_contours=half_num_contours,
smoothing_radius_grid_cells=smoothing_radius_grid_cells,
output_dir_name=output_dir_name
)
_add_colour_bar(
figure_file_name=panel_file_names[i],
colour_map_object=colour_map_object,
max_colour_value=max_colour_values[i],
temporary_dir_name=output_dir_name
)
print('\n')
figure_file_name = '{0:s}/saliency_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(figure_file_name))
num_panel_rows = int(numpy.floor(
numpy.sqrt(num_composites)
))
num_panel_columns = int(numpy.ceil(
float(num_composites) / num_panel_rows
))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names,
output_file_name=figure_file_name, border_width_pixels=25,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns
)
imagemagick_utils.trim_whitespace(
input_file_name=figure_file_name, output_file_name=figure_file_name,
border_width_pixels=10
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
saliency_file_names=getattr(INPUT_ARG_OBJECT, INPUT_FILES_ARG_NAME),
composite_names=getattr(INPUT_ARG_OBJECT, COMPOSITE_NAMES_ARG_NAME),
colour_map_name=getattr(INPUT_ARG_OBJECT, COLOUR_MAP_ARG_NAME),
max_colour_values=numpy.array(
getattr(INPUT_ARG_OBJECT, MAX_VALUES_ARG_NAME), dtype=float
),
half_num_contours=getattr(INPUT_ARG_OBJECT, HALF_NUM_CONTOURS_ARG_NAME),
smoothing_radius_grid_cells=getattr(
INPUT_ARG_OBJECT, SMOOTHING_RADIUS_ARG_NAME
),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| mit |
matrogers/pylearn2 | pylearn2/train_extensions/tests/test_wmape_channel.py | 32 | 2531 | """
Tests for WMAPE.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
from theano.compile import function
import numpy as np
from numpy.testing import assert_allclose
def test_wmape():
"""Test WMapeChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
X = trainer.model.get_input_space().make_theano_batch()
Y = trainer.model.fprop(X)
f = function([X], Y, allow_input_downcast=True)
y_hat = f(trainer.dataset.X)
wmape_num_exp = abs(trainer.dataset.y - y_hat).sum()
wmape_den_exp = abs(trainer.dataset.y).sum()
exp_array = np.asarray([wmape_num_exp, wmape_den_exp])
wmape_num_real = trainer.model.monitor.channels['train_wmape_num'].\
val_record
wmape_den_real = trainer.model.monitor.channels['train_wmape_den'].\
val_record
real_array = np.asarray([wmape_num_real[-1], wmape_den_real[-1]])
assert_allclose(exp_array, real_array)
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.\
random_dense_design_matrix_for_regression
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
reg_min: 1,
reg_max: 1000
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Linear {
layer_name: y,
dim: 1,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_wmape_num,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.wmape_channel.WMapeNumeratorChannel {},
!obj:pylearn2.train_extensions.wmape_channel.\
WMapeDenominatorChannel {},
],
}
"""
| bsd-3-clause |
sandeepdsouza93/TensorFlow-15712 | tensorflow/examples/learn/text_classification.py | 8 | 4925 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = tf.contrib.layers.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Dapid/scipy | doc/source/tutorial/examples/newton_krylov_preconditioning.py | 99 | 2489 | import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print 'Residual', abs(residual(sol.x)).max()
print 'Evaluations', count[0]
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
pypot/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
UCNA/main | Scripts/LEDPulser/PD_LED_Analysis/pd_led_pmt_gain.py | 1 | 5784 | # pd_led_pmt_gain.py
# Author: Simon Slutsky
# Created: 10/21/2013
#
# Plot fitted pmt gains output from pd_led_pmt_batch file
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from matplotlib.backends.backend_pdf import PdfPages
from math import sqrt
#from ROOT import TCanvas
import sys
sys.path.append('../../') # find RunPlotter.py
from RunPlotter import getTimeForRunlist
import matplotlib.dates as dates
def ReadLEDFile():
imagedir = '/data1/saslutsky/LEDPulser/images_06_10_2015_16way_separate_wavelength_coeff_20254_23173/'
filename = 'PMTGainResults.txt'
path = imagedir + "/" +filename
outputfilename = 'GainsTogether.pdf'
outputpath = imagedir + "/" + outputfilename
# import data.
# TODO: improve this to read the header string from the file
readData = np.genfromtxt(path, skip_header=1,
delimiter = "\t",
names = ['Run','tube', 'A', 'AErr','Mu',
'MuErr','Sigma','SigmaErr', 'Chi2'])
readData.sort(order = 'Run')
return readData, outputpath
def getLEDDataforTube(tubeIn):
dat = ReadLEDFile()
cutDat = dat[dat['tube'] == tubeIn]
return cutDat
# copy/pasted from pd_led_gain.py - needs updates
if __name__ == "__main__":
savebool = sys.argv[1]
datebool = sys.argv[2]
plt.ion() #turn on interactive mode
rcParams['figure.figsize'] = 10, 10 #Set default fig siz
data, outputpath = ReadLEDFile()
means = data['Mu']
meanErrs = data['MuErr']
average = sum(means[:]/len(means))
print "AVERAGE = " + str(average)
# calculate "gain" arbitrarily normalized to 260
gains = [m/average for m in means]
gainsErr = [mErr/average for mErr in meanErrs]
#fig = plt.figure()
fig0, (ax0) = plt.subplots()
fig1, (ax1) = plt.subplots()
fig2, (ax2) = plt.subplots()
fig3, (ax3) = plt.subplots()
figChi, (ax4) = plt.subplots()
figures = [fig0, fig1, fig2, fig3, figChi]
plt.rc('axes', color_cycle=['r', 'g', 'b', 'y'])
axes = [ax0, ax1, ax2, ax3, ax4]
ax0.set_title("A")
ax1.set_title("Mean Response")
ax2.set_title("Relative Gain")
ax3.set_title("Sigma")
ax4.set_title("Chi2")
marks = 4
if not datebool:
ax0.errorbar(data['Run'], data['A'], yerr=data['AErr'],
linestyle='None', marker='o', markersize=marks)
ax1.errorbar(data['Run'], data['Mu'], yerr=data['MuErr'],
linestyle='None', marker='o', markersize=marks)
ax2.errorbar(data['Run'], gains, gainsErr,
linestyle='None', marker='o', markersize=marks)
ax3.errorbar(data['Run'], data['Sigma'], yerr=data['SigmaErr'],
linestyle='None', marker='o', markersize=marks)
ax4.errorbar(data['Run'], data['Chi2'],
linestyle='None', marker='o', markersize=marks)
# ax0.set_xlim([20800, 24000])
# ax1.set_xlim([20800, 24000])
# ax2.set_xlim([20800, 24000])
# ax3.set_xlim([20800, 24000])
# ax4.set_xlim([20800, 24000])
ax0.set_xlabel('Run Number')
ax1.set_xlabel('Run Number')
ax2.set_xlabel('Run Number')
ax3.set_xlabel('Run Number')
ax4.set_xlabel('Run Number')
if datebool:
timelist = getTimeForRunlist(data['Run'])
ax0.errorbar(timelist, data['A'], yerr=data['AErr'],
linestyle='None', marker='o', markersize=marks)
ax1.errorbar(timelist, data['Mu'], yerr=data['MuErr'],
linestyle='None', marker='o', markersize=marks)
ax2.errorbar(timelist, gains, gainsErr,
linestyle='None', marker='o', markersize=marks)
ax3.errorbar(timelist, data['Sigma'], yerr=data['SigmaErr'],
linestyle='None', marker='o', markersize=marks)
ax4.errorbar(timelist, data['Chi2'],
linestyle='None', marker='o', markersize=marks)
for ax in axes:
ax.xaxis.set_major_formatter(dates.DateFormatter("%m/%d/%y"))
ax.xaxis_date()
ax.set_xlabel('Time')
sepfigs = list()
sepaxes = list()
for i in range(0,8):
tmpfig, (tmpax) = plt.subplots()
if i < 4:
chan = "E" + str(i)
else:
chan = "W" + str(i%4)
tmpax.set_title("PMT Response to LED Gain Pulse: " + chan)
tmpax.set_ylabel("PMT (ADC)")
sepfigs.append(tmpfig)
sepaxes.append(tmpax)
data_cut = data[[data['tube'] == i]]
# print data_cut
if not datebool:
tmpax.errorbar(data_cut['Run'], data_cut['Mu'], yerr=data_cut['MuErr'],
linestyle='None', marker='o', markersize=marks)
tmpax.set_xlabel('Run Number')
if datebool:
timelist = getTimeForRunlist(data_cut['Run'])
tmpax.errorbar(timelist, data_cut['Mu'], yerr=data_cut['MuErr'],
linestyle='None', marker='o', markersize=marks)
tmpax.set_xlabel('Time')
if savebool:
outputfile = PdfPages(outputpath)
for f in range(0, len(figures)):
outputfile.savefig(figures[f])
outputfile.close()
plt.show(block=True) #block=True keeps the plot window open when in interactive mode
# too hard to do it manually
#run = data['Run']
#_run0 = np.where(data['Channel'] == 0, run, 0]
#run0 = _run0[_run0.nonzero() #strip zeroes from the array
#p0 = data['p0']
#_p00 = np.where(data['Channel'] == 0, p0, 0]
#p00 = _p00[_p00.nonzero()
#for i in range (0, len(run)):
# if y[i] > 1e8 or y[i] < -1e2:
# x[i] = 0
# y[i] = 0
# print i
| gpl-3.0 |
wjlei1990/spaceweight | src/spaceweight/sphereweightbase.py | 1 | 21534 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Class that contains sphere weighting.
:copyright:
Wenjie Lei ([email protected]), 2016
:license:
GNU Lesser General Public License, version 3 (LGPLv3)
(http://www.gnu.org/licenses/lgpl-3.0.en.html)
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from math import cos, sin
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import colors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from obspy.geodetics import locations2degrees
from obspy.geodetics import gps2dist_azimuth
from . import SpherePoint
from .weightbase import WeightBase
from .plot_util import plot_circular_sector, plot_points_in_polar
from .plot_util import plot_rings_in_polar, plot_two_histograms
from .plot_util import plot_2d_matrix
from .util import sort_array_into_bins, scale_matrix_by_exp
from . import logger
from .util import search_for_ratio
from .spherevoronoi import SphericalVoronoi
def _azimuth(lat1, lon1, lat2, lon2):
"""
The azimuth(unit:degree) starting from point1 to
point 2 on the sphere
"""
_, azi, _ = gps2dist_azimuth(lat1, lon1, lat2, lon2)
return azi
def _distance(lat1, lon1, lat2, lon2):
"""
The distance(unit:degree) between 2 points on
the sphere, unit in degree
"""
return locations2degrees(lat1, lon1, lat2, lon2)
class SphereWeightBase(WeightBase):
"""
The superclass for sphere weight. It handles 2D sphere problem,
which means the points should be on the surface of a globe
"""
def __init__(self, points, center=None, sort_by_tag=False,
remove_duplicate=False, normalize_mode="average"):
"""
:param points: the list of points
:type points: list
:param center: the center point. Not required by this weighting.
If provied, then some methods regarding to azimuth(to the center)
can be used.
:param sort_by_tag: refer to superclass WeightBase
:param remove_duplicate: refer to superclass WeightBase
:param normalize_mode: refer to superclass WeightBase
:return:
"""
if not isinstance(points[0], SpherePoint):
raise TypeError("Type of points should be SpherePoint")
if center is not None and not isinstance(center, SpherePoint):
raise TypeError("Type of center should be SpherePoint")
WeightBase.__init__(self, points, sort_by_tag=sort_by_tag,
remove_duplicate=remove_duplicate)
self.normalize_mode = normalize_mode
self.center = center
if self.points_dimension != (2,):
raise ValueError("For the sphere problem, dimension of points "
"coordinates should be 2: [latitude, logitude].")
def _calculate_azimuth_array(self):
if self.center is None:
raise ValueError("Center must be specified to calculate azimuth")
azi_array = np.zeros(self.npoints)
for idx, point in enumerate(self.points):
azi = _azimuth(self.center.coordinate[0],
self.center.coordinate[1],
point.coordinate[0],
point.coordinate[1])
azi_array[idx] = azi
return azi_array
def _stats_azimuth_info(self, nbins):
if self.center is None:
raise ValueError("No center information provided. Impossible to"
"calculate azimuth information.")
azi_array = self._calculate_azimuth_array()
azi_bin, azi_bin_dict = \
sort_array_into_bins(azi_array, 0.0, 360.0, nbins=nbins)
return azi_array, azi_bin, azi_bin_dict
def _calculate_distance_array(self):
if self.center is None:
raise ValueError("No center information provied. Impossible to"
"calculate distances from center to points")
npts = self.npoints
dist_array = np.zeros(npts)
for idx, point in enumerate(self.points):
dist = _distance(self.center.coordinate[0],
self.center.coordinate[1],
point.coordinate[0],
point.coordinate[1])
dist_array[idx] = dist
return dist_array
def _stats_distance_info(self, nbins):
if self.center is None:
raise ValueError("No center information provied. Impossible to"
"calculate distances from center to points")
dist_array = self._calculate_distance_array()
dist_bin, dist_bin_dict = \
sort_array_into_bins(dist_array, 0.0, 180.0, nbins=nbins)
return dist_array, dist_bin, dist_bin_dict
def _sort_weight_into_bins(self, nbins):
if self.center is None:
raise ValueError("No event information provided. Impossible to"
"calculate azimuth information")
azi_array, azi_bin, azi_bin_dict = self._stats_azimuth_info(nbins)
azi_weight_bin = np.zeros(nbins)
for bin_idx, station_list in azi_bin_dict.items():
azi_weight_bin[bin_idx] = \
np.sum(self.points_weights[station_list])
dist_array, dist_bin, dist_bin_dict = \
self._stats_distance_info(nbins=nbins)
dist_weight_bin = np.zeros(nbins)
for bin_idx, station_list in dist_bin_dict.items():
dist_weight_bin[bin_idx] = \
np.sum(self.points_weights[station_list])
return azi_array, azi_bin, azi_weight_bin, \
dist_array, dist_bin, dist_weight_bin
def plot_global_map(self, figname=None, lon0=None):
"""
Plot global map of points and centers
"""
from mpl_toolkits.basemap import Basemap
fig = plt.figure(figsize=(10, 4))
if lon0 is None:
if self.center is not None:
lon0 = self.center.coordinate[1]
else:
lon0 = 180.0
m = Basemap(projection='moll', lon_0=lon0, lat_0=0.0,
resolution='c')
m.drawcoastlines()
m.fillcontinents()
m.drawparallels(np.arange(-90., 120., 30.))
m.drawmeridians(np.arange(0., 420., 60.))
m.drawmapboundary()
cm = plt.cm.get_cmap('RdYlBu')
x, y = m(self.points_coordinates[:, 1], self.points_coordinates[:, 0])
m.scatter(x, y, 100, color=self.points_weights, marker="^",
edgecolor="k", linewidth='0.3', zorder=3, cmap=cm,
alpha=0.8)
plt.colorbar(shrink=0.95)
if self.center is not None:
center_lat = self.center.coordinate[0]
center_lon = self.center.coordinate[1]
center_x, center_y = m(center_lon, center_lat)
m.scatter(center_x, center_y, 150, color="g", marker="o",
edgecolor="k", linewidth='0.3', zorder=3)
plt.tight_layout()
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close(fig)
def plot_station_weight_distribution(self, nbins=12, figname=None):
"""
Plot distribution of station and weight in azimuth bins
"""
if not isinstance(self.center, SpherePoint):
raise ValueError("No event information provided. Impossible to"
"calculate azimuth information")
azi_array, azi_bin, azi_weight_bin, \
dist_array, dist_bin, dist_weight_bin = \
self._sort_weight_into_bins(nbins=nbins)
fig = plt.figure(figsize=(20, 10))
g = gridspec.GridSpec(2, 4)
# plot the stations in polar coords
plt.subplot(g[0, 0])
plot_points_in_polar(dist_array, azi_array)
# plot the station counts in azimuth bins
plt.subplot(g[0, 1], polar=True)
plot_circular_sector(azi_bin, title="Points Azimuthal bins")
# plot the stations weights sum in azimuth bins
plt.subplot(g[0, 2], polar=True)
plot_circular_sector(azi_weight_bin,
title="Weight sum in Azimuthal bins")
# plot the histogram of station counts and weights sum in azimuth bins
plt.subplot(g[0, 3])
plot_two_histograms(azi_bin, azi_weight_bin, tag1="stations",
tag2="weights")
# plot the stations counts in epi-center distance bins
plt.subplot(g[1, 1], polar=True)
bin_edges = np.linspace(0, 180, nbins, endpoint=False)
plot_rings_in_polar(dist_bin, bin_edges,
title="Distance bins")
# plot the stations weights sum in distance bins
plt.subplot(g[1, 2], polar=True)
bin_edges = np.linspace(0, 180, nbins, endpoint=False)
plot_rings_in_polar(dist_weight_bin, bin_edges,
title="Weight in distance bin")
# plot the histogram of station counts and weights in distance bins
plt.subplot(g[1, 3])
bin_edges = np.linspace(0, 180, nbins, endpoint=False)
plot_two_histograms(dist_bin, dist_weight_bin, bin_edges,
tag1="stations", tag2="weights")
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close(fig)
class SphereDistRel(SphereWeightBase):
"""
Class that using the distances between points to calculate the weight.
The basic idea is if two points are close to each other, the contribution
to weight will be high(weight will be small).
"""
def __init__(self, points, center=None, sort_by_tag=False,
remove_duplicate=False, normalize_mode="average"):
SphereWeightBase.__init__(self, points, center=center,
sort_by_tag=sort_by_tag,
remove_duplicate=remove_duplicate,
normalize_mode=normalize_mode)
self.exp_matrix = np.zeros([self.npoints, self.npoints])
def _build_distance_matrix(self):
"""
calculate distance matrix
"""
coords = self.points_coordinates
npts = self.npoints
dist_m = np.zeros([npts, npts])
# calculate the upper part
for _i in range(npts):
for _j in range(_i+1, npts):
loc_i = coords[_i]
loc_j = coords[_j]
dist_m[_i, _j] = \
_distance(loc_i[0], loc_i[1],
loc_j[0], loc_j[1])
# symetric
dist_m[_j, _i] = dist_m[_i, _j]
# fill dianogal with zero, which is the station self term
np.fill_diagonal(dist_m, 0.0)
return dist_m
@staticmethod
def _transfer_dist_to_weight(dist_m, ref_distance):
"""
Transfer the distance matrix into weight matrix by a given
reference distance(distance unit is degree)
:param dist_m:
:param ref_distance:
:return:
"""
exp_matrix, sum_on_row = scale_matrix_by_exp(dist_m, ref_distance,
order=2.0)
weight = 1. / sum_on_row
return weight, exp_matrix
def calculate_weight(self, ref_distance):
"""
Calculate the weight based upon a given reference distance
:param ref_distance:
:return:
"""
"""
:param ref_distance:
:return:
"""
dist_m = self._build_distance_matrix()
weight, self.exp_matrix = \
self._transfer_dist_to_weight(dist_m, ref_distance)
self.points_weights = weight
self.normalize_weight(self.normalize_mode)
logger.info("Number of points at this stage: %d" % self.npoints)
logger.info("Condition number of weight array(max/min): %8.2f"
% self.condition_number)
def scan(self, start=1.0, end=50.0, gap=1.0, plot=False, figname=None):
"""
Scan among the range of ref_dists and return the condition number
The defination of condition number is the max weight over min weight.
:param start: the start of ref_distance
:param end: the end of ref_distance
:param gap: the delta value
:param plot: plot flag
:param figname: save the figure to figname
:return: a list of ref_distance and condition numbers
"""
nscans = int((end - start) / gap) + 1
ref_dists = \
[start + gap * i for i in range(nscans)]
cond_nums = np.zeros(nscans)
dist_m = self._build_distance_matrix()
for idx, _ref_dist in enumerate(ref_dists):
weight, _ = self._transfer_dist_to_weight(dist_m, _ref_dist)
cond_nums[idx] = max(weight) / min(weight)
if plot:
plt.plot(ref_dists, cond_nums, 'r-*')
plt.xlabel("Reference distance(degree)")
plt.ylabel("Condition number")
if figname is None:
plt.show()
else:
plt.savefig(figname)
return ref_dists, cond_nums
def smart_scan(self, max_ratio=0.5, start=1.0, gap=0.5, drop_ratio=0.20,
plot=False, figname=None):
"""
Searching for the ref_distance by condition number which satisfy
our condition. As the ref_distance increase from small values(near
0), we know that the condition number will first increase, reach
its maxium and then decrease. The final ref_distance will satisfy:
optimal_cond_number = max_cond_number * max_ratio
The drop ratio determines the searching end point, which is:
end_cond_number = max_cond_number * drop_ratio
:param max_ratio: determine the optimal ref_distance(return value)
:param start: search start point
:param gap: delta
:param drop_ratio: determin the search end point
:param plot: plot flag
:param figname: figure name
:return: the optimal ref_distance and correspoinding condition number
"""
# print("npoints: %d" % self.npoints)
if self.npoints <= 2:
# if only two points, then the all the weights will be 1
# anyway
logger.info("Less or equal than two points so the weights are "
"automatically set to 1")
self.points_weights = np.ones(self.npoints)
self.normalize_weight()
return 1, 1
if self.npoints <= 10:
# reset drop ratio if there is less that 5 points
# otherwise, it might go overflow while searching for
# drop. Note that this will not impact the final search
# result.
drop_ratio = 0.99
dist_m = self._build_distance_matrix()
ref_dists = []
cond_nums = []
idx = 0
_ref_dist = start
while True:
weight, _ = self._transfer_dist_to_weight(dist_m, _ref_dist)
_cond_num = max(weight) / min(weight)
ref_dists.append(_ref_dist)
cond_nums.append(_cond_num)
if idx >= 2 and (_cond_num < drop_ratio * max(cond_nums)):
break
if _ref_dist > 200.0:
if np.isclose(max(cond_nums), min(cond_nums)):
print("cond nums are very close to each other")
break
else:
print("Smart scan error with _ref_dist overflow")
return None, None
idx += 1
_ref_dist += gap
minv = min(cond_nums)
minv_idx = cond_nums.index(minv)
maxv = max(cond_nums)
maxv_idx = cond_nums.index(maxv)
logger.info("Min and Max condition number points([ref_dist, cond_num])"
" -- min[%f, %f] -- max[%f, %f]" %
(ref_dists[minv_idx], minv, ref_dists[maxv_idx], maxv))
threshold = minv + max_ratio * (maxv - minv)
best_idx, best_cond_num = search_for_ratio(cond_nums, threshold)
best_ref_dist = ref_dists[best_idx]
logger.info("Best ref_distance and corresponding condition number:"
"[%f, %f]" % (best_ref_dist, best_cond_num))
if plot:
plt.plot(ref_dists, cond_nums, 'r-*')
plt.xlabel("Reference distance(degree)")
plt.ylabel("Condition number")
plt.plot(best_ref_dist, best_cond_num, 'g*', markersize=10)
plt.plot([ref_dists[0], ref_dists[-1]], [threshold, threshold],
'b--')
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close()
# calculate weight based on the best ref_dist value
weight, self.exp_matrix = \
self._transfer_dist_to_weight(dist_m, best_ref_dist)
self.points_weights = weight
self.normalize_weight()
return best_ref_dist, best_cond_num
def plot_exp_matrix(self, figname=None):
plot_2d_matrix(self.exp_matrix,
title="Distance Exponential Matrix",
figname=figname)
class SphereVoronoi(SphereWeightBase):
def __init__(self, points, voronoi_order=1.0, center=None,
sort_by_tag=False, remove_duplicate=True,
normalize_mode="average"):
"""
:param points: a list of SpherePoints
:param voronoi_order: voronoi order. The weight is determined by
by the surface area of voronoi cell, to a cetain order:
weight = (surface_area) ** voronoi_order
:param center: center point
:param sort_by_tag:
:param remove_duplicate:
:param normalize_mode:
:return:
"""
SphereWeightBase.__init__(self, points, center=center,
sort_by_tag=sort_by_tag,
remove_duplicate=remove_duplicate,
normalize_mode=normalize_mode)
self.sv = None
self.voronoi_order = voronoi_order
# sphere parameter for voronoi usage
self.sphere_radius = 1.0
self.sphere_center = np.zeros(3)
def _transfer_coordinate(self):
"""
Transfer (longitude, latitude) to (x, y, z) on sphere(radius, center)
"""
radius = self.sphere_radius
center = self.sphere_center
sphere_loc = np.zeros([self.npoints, 3])
for _i, point in enumerate(self.points):
lat = np.deg2rad(point.coordinate[0])
lon = np.deg2rad(point.coordinate[1])
sphere_loc[_i, 0] = radius * cos(lat) * cos(lon) + center[0]
sphere_loc[_i, 1] = radius * cos(lat) * sin(lon) + center[1]
sphere_loc[_i, 2] = radius * sin(lat) + center[2]
return sphere_loc
def calculate_weight(self):
trans_points = self._transfer_coordinate()
# for _i in range(self.nstations):
# print("%10s: [%10.5f, %10.5f] -- [%10.5f, %10.5f, %10.5f]"
# % (self.station_tag[_i], self.station_loc[_i][0],
# self.station_loc[_i][1], self.points[_i][0],
# self.points[_i][1], self.points[_i][2]))
self.sv = SphericalVoronoi(trans_points, radius=self.sphere_radius,
center=self.sphere_center)
self.sv.sort_vertices_of_regions()
surface_area, coverage = self.sv.compute_surface_area()
weight = surface_area ** self.voronoi_order
logger.info("Voronoi surface area coverage: %15.5f" % coverage)
self.points_weights = weight
self.normalize_weight()
logger.info("Number of points at this stage: %d" % self.npoints)
logger.info("Condition number of weight array(max/min): %8.2f"
% self.condition_number)
def plot_sphere(self):
points = self._transfer_coordinate()
sv = self.sv
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# plot the unit sphere for reference (optional)
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, color='y', alpha=0.05)
# plot Voronoi vertices
# ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
# c='g')
# indicate Voronoi regions (as Euclidean polygons)
for region in sv.regions:
random_color = colors.rgb2hex(np.random.rand(3))
polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
polygon.set_color(random_color)
ax.add_collection3d(polygon)
# plot generator points
ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.set_xticks([-1, 1])
ax.set_yticks([-1, 1])
ax.set_zticks([-1, 1])
plt.tick_params(axis='both', which='major', labelsize=6)
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
| gpl-3.0 |
emon10005/scikit-image | doc/examples/plot_contours.py | 30 | 1247 | """
===============
Contour finding
===============
``skimage.measure.find_contours`` uses a marching squares method to find
constant valued contours in an image. Array values are linearly interpolated
to provide better precision of the output contours. Contours which intersect
the image edge are open; all others are closed.
The `marching squares algorithm
<http://www.essi.fr/~lingrand/MarchingCubes/algo.html>`__ is a special case of
the marching cubes algorithm (Lorensen, William and Harvey E. Cline. Marching
Cubes: A High Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
# Construct some test data
x, y = np.ogrid[-np.pi:np.pi:100j, -np.pi:np.pi:100j]
r = np.sin(np.exp((np.sin(x)**3 + np.cos(y)**2)))
# Find contours at a constant value of 0.8
contours = measure.find_contours(r, 0.8)
# Display the image and plot all contours found
fig, ax = plt.subplots()
ax.imshow(r, interpolation='nearest', cmap=plt.cm.gray)
for n, contour in enumerate(contours):
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
| bsd-3-clause |
ddboline/kaggle_imdb_sentiment_model | backup/my_model.py | 1 | 2579 | #!/usr/bin/python
import os
import pandas as pd
from KaggleWord2VecUtility import KaggleWord2VecUtility
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
#master_word_dict = {}
#number_of_rows = 0
def clean_review_function(review):
global master_word_dict, number_of_rows
list_of_words = KaggleWord2VecUtility.review_to_wordlist(review, remove_stopwords=False)
return ' '.join(list_of_words)
def my_model(nfeatures=100, run_test_data=False):
print 'nfeatures', nfeatures
labeledtrain_data = pd.read_csv('labeledTrainData.tsv', header=0, delimiter='\t', quoting=3)
print 'labeledtrain_data.shape', labeledtrain_data.shape
clean_labeledtrain_reviews = labeledtrain_data['review'].apply(clean_review_function)
print clean_labeledtrain_reviews.shape
vectorizer = CountVectorizer(analyzer = 'word', tokenizer = None, preprocessor = None, stop_words = None, max_features = nfeatures)
train_review_subset_x = clean_labeledtrain_reviews[::2]
train_review_subset_y = labeledtrain_data['sentiment'][::2]
test_review_subset_x = clean_labeledtrain_reviews[1::2]
test_review_subset_y = labeledtrain_data['sentiment'][1::2]
train_data_features = vectorizer.fit_transform(train_review_subset_x).toarray()
forest = RandomForestClassifier(n_estimators = 100)
forest = forest.fit(train_data_features, train_review_subset_y)
test_data_features = vectorizer.transform(test_review_subset_x).toarray()
print forest.score(test_data_features, test_review_subset_y)
del train_review_subset_x, train_review_subset_y, test_review_subset_x, test_review_subset_y, test_data_features, train_data_features
if run_test_data:
train_data_features = vectorizer.fit_transform(clean_labeledtrain_reviews).toarray()
forest = forest.fit(train_data_features, labeledtrain_data['sentiment'])
test_data = pd.read_csv('testData.tsv', header=0, delimiter='\t', quoting=3)
clean_test_reviews = test_data['review'].apply(clean_review_function)
test_data_features = vectorizer.transform(clean_test_reviews).toarray()
result = forest.predict(test_data_features)
output = pd.DataFrame(data={'id': test_data['id'], 'sentiment': result})
output.to_csv('my_model.csv', index=False, quoting=3)
if __name__ == '__main__':
nfeatures = 100
for arg in os.sys.argv:
try:
nfeatures = int(arg)
except ValueError:
pass
my_model(nfeatures, run_test_data=True)
| mit |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/tests/test_import_nesting.py | 7 | 1607 | import sys
from subprocess import Popen, PIPE
from mne.utils import run_tests_if_main, requires_version
run_script = """
from __future__ import print_function
import sys
import mne
out = []
# check scipy
ok_scipy_submodules = set(['scipy', 'numpy', # these appear in old scipy
'fftpack', 'lib', 'linalg',
'misc', 'sparse', 'version'])
scipy_submodules = set(x.split('.')[1] for x in sys.modules.keys()
if x.startswith('scipy.') and '__' not in x and
not x.split('.')[1].startswith('_'))
bad = scipy_submodules - ok_scipy_submodules
if len(bad) > 0:
out.append('Found un-nested scipy submodules: %s' % list(bad))
# check sklearn and others
_sklearn = _pandas = _nose = False
for x in sys.modules.keys():
if x.startswith('sklearn') and not _sklearn:
out.append('Found un-nested sklearn import')
_sklearn = True
if x.startswith('pandas') and not _pandas:
out.append('Found un-nested pandas import')
_pandas = True
if x.startswith('nose') and not _nose:
out.append('Found un-nested nose import')
_nose = True
if len(out) > 0:
print('\\n' + '\\n'.join(out), end='')
exit(1)
"""
@requires_version('scipy', '0.11') # old ones not organized properly
def test_module_nesting():
"""Test that module imports are necessary
"""
proc = Popen([sys.executable, '-c', run_script], stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
raise AssertionError(stdout)
run_tests_if_main()
| bsd-3-clause |
vortex-ape/scikit-learn | benchmarks/bench_text_vectorizers.py | 36 | 2112 | """
To run this benchmark, you will need,
* scikit-learn
* pandas
* memory_profiler
* psutil (optional, but recommended)
"""
from __future__ import print_function
import timeit
import itertools
import numpy as np
import pandas as pd
from memory_profiler import memory_usage
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import (CountVectorizer, TfidfVectorizer,
HashingVectorizer)
n_repeat = 3
def run_vectorizer(Vectorizer, X, **params):
def f():
vect = Vectorizer(**params)
vect.fit_transform(X)
return f
text = fetch_20newsgroups(subset='train').data
print("="*80 + '\n#' + " Text vectorizers benchmark" + '\n' + '='*80 + '\n')
print("Using a subset of the 20 newsrgoups dataset ({} documents)."
.format(len(text)))
print("This benchmarks runs in ~20 min ...")
res = []
for Vectorizer, (analyzer, ngram_range) in itertools.product(
[CountVectorizer, TfidfVectorizer, HashingVectorizer],
[('word', (1, 1)),
('word', (1, 2)),
('word', (1, 4)),
('char', (4, 4)),
('char_wb', (4, 4))
]):
bench = {'vectorizer': Vectorizer.__name__}
params = {'analyzer': analyzer, 'ngram_range': ngram_range}
bench.update(params)
dt = timeit.repeat(run_vectorizer(Vectorizer, text, **params),
number=1,
repeat=n_repeat)
bench['time'] = "{:.2f} (+-{:.2f})".format(np.mean(dt), np.std(dt))
mem_usage = memory_usage(run_vectorizer(Vectorizer, text, **params))
bench['memory'] = "{:.1f}".format(np.max(mem_usage))
res.append(bench)
df = pd.DataFrame(res).set_index(['analyzer', 'ngram_range', 'vectorizer'])
print('\n========== Run time performance (sec) ===========\n')
print('Computing the mean and the standard deviation '
'of the run time over {} runs...\n'.format(n_repeat))
print(df['time'].unstack(level=-1))
print('\n=============== Memory usage (MB) ===============\n')
print(df['memory'].unstack(level=-1))
| bsd-3-clause |
acimmarusti/isl_exercises | chap4/chap4ex10.py | 1 | 5716 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
import statsmodels.formula.api as smf
import statsmodels.api as sm
filename = '../Weekly.csv'
data = pd.read_csv(filename)
#All predictors#
allpred = list(data.columns)
allpred.remove('Direction')
#Summary (mean, stdev, range, etc)#
print('\nFull data summary')
print(data.describe())
#Correlations#
print('\nData correlations')
print(data.corr())
#List of predictors#
predictors = list(allpred)
predictors.remove('Year')
predictors.remove('Today')
#Pair plot matrix#
sns.set()
sns.pairplot(data, hue='Direction')
print('\n\n### LOGISTIC REGRESSION###')
## Logistic regression with statsmodels ##
lr_form = 'Direction~' + '+'.join(predictors)
logreg = smf.glm(formula=lr_form, data=data, family=sm.families.Binomial()).fit()
print('\nLogistic regression fit summary')
print(logreg.summary())
## Logistic regression with sklearn ##
#Prepare data#
X_full = np.array(data[predictors])
Y_full = np.array(data['Direction'])
# Initiate logistic regression object
logit = LogisticRegression()
# Fit model. Let X_full = matrix of predictors, y_train = matrix of variables.
# NOTE: Do not include a column for the intercept when fitting the model.
resLogit = logit.fit(X_full, Y_full)
#Predicted values for training set
Y_pred_full = resLogit.predict(X_full)
#Confusion matrix#
print("\nConfusion matrix full:")
print(confusion_matrix(Y_full, Y_pred_full))
#Accuracy, precision and recall#
print('\nAccuracy full:', np.round(accuracy_score(Y_full, Y_pred_full), 3))
print("Precision full:", np.round(precision_score(Y_full, Y_pred_full, pos_label='Up'), 3))
print("Recall full:", np.round(recall_score(Y_full, Y_pred_full, pos_label='Up'), 3))
## Keeping a test set based in year ##
print('\n\nUsing train/test set')
new_pred = ['Lag2']
#Prepare data#
data_train = data[data['Year'] < 2009]
data_test = data[data['Year'] >= 2009]
X_train = np.array(data_train[new_pred])
Y_train = np.array(data_train['Direction'])
X_test = np.array(data_test[new_pred])
Y_test = np.array(data_test['Direction'])
# Initiate logistic regression object
logit_clf = LogisticRegression()
# Fit model. Let X_train = matrix of predictors, Y_train = matrix of variables.
resLogit_clf = logit_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred = resLogit_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix logit:")
print(confusion_matrix(Y_test, Y_pred))
#Accuracy, precision and recall#
print('\nAccuracy logit:', np.round(accuracy_score(Y_test, Y_pred), 3))
print("Precision logit:", np.round(precision_score(Y_test, Y_pred, pos_label='Up'), 3))
print("Recall logit:", np.round(recall_score(Y_test, Y_pred, pos_label='Up'), 3))
print('\n\n### LINEAR DISCRIMINANT ANALYSIS ###')
# Initiate logistic regression object
lda_clf = LinearDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
reslda_clf = lda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_lda = reslda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(reslda_clf.classes_)
print(reslda_clf.priors_)
#Group means#
print("\nGroup means")
#print(reslda_clf.classes_)
print(reslda_clf.means_)
#Coefficients#
print("\nCoefficients")
#print(reslda_clf.classes_)
print(reslda_clf.intercept_)
print(reslda_clf.coef_)
#Confusion matrix#
print("\nConfusion matrix LDA:")
print(confusion_matrix(Y_test, Y_pred_lda))
#Accuracy, precision and recall#
print("\nAccuracy LDA:", np.round(accuracy_score(Y_test, Y_pred_lda), 3))
print("Precision LDA:", np.round(precision_score(Y_test, Y_pred_lda, pos_label='Up'), 3))
print("Recall LDA:", np.round(recall_score(Y_test, Y_pred_lda, pos_label='Up'), 3))
print('\n\n### QUADRATIC DISCRIMINANT ANALYSIS ###')
# Initiate QDA object
qda_clf = QuadraticDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resqda_clf = qda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_qda = resqda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(resqda_clf.classes_)
print(resqda_clf.priors_)
#Group means#
print("\nGroup means")
#print(resqda_clf.classes_)
print(resqda_clf.means_)
#Confusion matrix#
print("\nConfusion matrix QDA:")
print(confusion_matrix(Y_test, Y_pred_qda))
#Accuracy, precision and recall#
print("\nAccuracy QDA:", np.round(accuracy_score(Y_test, Y_pred_qda), 3))
print("Precision QDA:", np.round(precision_score(Y_test, Y_pred_qda, pos_label='Up'), 3))
print("Recall QDA:", np.round(recall_score(Y_test, Y_pred_qda, pos_label='Up'), 3))
print('\n\n### K NEAREST NEIGHBORS ###')
# Initiate KNN object
knn_clf = KNeighborsClassifier(n_neighbors=1)
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resknn_clf = knn_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_knn = resknn_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix KNN:")
print(confusion_matrix(Y_test, Y_pred_knn))
#Accuracy, precision and recall#
print("\nAccuracy KNN:", np.round(accuracy_score(Y_test, Y_pred_knn), 3))
print("Precision KNN:", np.round(precision_score(Y_test, Y_pred_knn, pos_label='Up'), 3))
print("Recall KNN:", np.round(recall_score(Y_test, Y_pred_knn, pos_label='Up'), 3))
#plt.show()
| gpl-3.0 |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtk.py | 69 | 43991 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import matplotlib
from matplotlib import verbose
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show(mainloop=True):
"""
Show all the figures and enter the gtk main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if mainloop and gtk.main_level() == 0 and \
len(Gcf.get_all_fig_managers())>0:
gtk.main()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTK(thisFig)
manager = FigureManagerGTK(canvas, num)
# equals:
#manager = FigureManagerGTK(FigureCanvasGTK(Figure(*args, **kwargs), num)
return manager
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
def destroy(self):
#gtk.DrawingArea.destroy(self)
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval <256:
key = chr(event.keyval)
else:
key = None
ctrl = event.state & gdk.CONTROL_MASK
shift = event.state & gdk.SHIFT_MASK
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def get_default_filetype(self):
return 'png'
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.window.set_title("Figure %d" % num)
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
self.toolbar.destroy()
self.__dict__.clear()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.png', 'home'),
('Back', 'Back to previous view','back.png', 'back'),
('Forward', 'Forward to next view','forward.png', 'forward'),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.png','pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.png', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.png', 'save_figure'),
)
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self._idle_draw_id = 0
def set_message(self, s):
if self._idle_draw_id == 0:
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._imageBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
try: lastrect, imageBack = self._imageBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._imageBack = axrect, drawable.get_image(*axrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
else:
def idle_draw(*args):
drawable.draw_image(gc, imageBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._init_toolbar2_4()
else:
self._init_toolbar2_2()
def _init_toolbar2_2(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append_space()
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
w = self.append_item(text,
tooltip_text,
'Private',
image,
getattr(self, callback)
)
self.append_space()
self.message = gtk.Label()
self.append_widget(self.message, None, None)
self.message.show()
def _init_toolbar2_4(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win,)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
self._create_toolitems_2_2()
self.update = self._update_2_2
self.fileselect = FileSelection(title='Save the figure',
parent=self.win)
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _create_toolitems_2_2(self):
# use the GTK+ 2.2 (and lower) GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.append_space()
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
item = self.append_item(text, tooltip_text, 'Private', image,
getattr(self, callback), callback_arg)
if scroll:
item.connect("scroll_event", getattr(self, callback))
self.omenu = gtk.OptionMenu()
self.omenu.set_border_width(3)
self.insert_widget(
self.omenu,
'Select axes that controls affect',
'Private', 0)
def _update_2_2(self):
# for GTK+ 2.2 and lower
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
# set up the axis menu
self.omenu.set_menu( self._make_axis_menu() )
self.omenu.show_all()
else:
self.omenu.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
if gtk.pygtk_version >= (2,4,0):
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
else:
class FileSelection(gtk.FileSelection):
"""GTK+ 2.2 and lower file selector which remembers the last
file/directory selected
"""
def __init__(self, path=None, title='Select a file', parent=None):
super(FileSelection, self).__init__(title)
if path: self.path = path
else: self.path = os.getcwd() + os.sep
if parent: self.set_transient_for(parent)
def get_filename_from_user(self, path=None, title=None):
if path: self.path = path
if title: self.set_title(title)
self.set_filename(self.path)
filename = None
if self.run() == int(gtk.RESPONSE_OK):
self.path = filename = self.get_filename()
self.hide()
ext = None
if filename is not None:
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return filename, ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0):
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
gtk.window_set_default_icon_from_file (
os.path.join (matplotlib.rcParams['datapath'], 'images', icon_filename))
except:
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| gpl-3.0 |
andaag/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
mblondel/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
ankurankan/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
dwillmer/blaze | blaze/compute/tests/test_pytables_compute.py | 14 | 7657 | from __future__ import absolute_import, division, print_function
import os
import pytest
import pandas as pd
tb = pytest.importorskip('tables')
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
from blaze.compatibility import xfail
import numpy as np
from blaze.compute.core import compute
from blaze.expr import symbol
from blaze import drop, discover, create_index
from blaze.utils import tmpfile
t = symbol('t', 'var * {id: int, name: string, amount: int}')
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@pytest.yield_fixture
def data():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
yield d
d.close()
f.close()
@pytest.yield_fixture
def csi_data():
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.cols.amount.create_csindex()
d.cols.id.create_csindex()
yield d
d.close()
f.close()
@pytest.yield_fixture
def idx_data():
with tmpfile('.h5') as fn:
f = tb.open_file(fn, mode='w')
d = f.create_table('/', 'title', x)
d.cols.amount.create_index()
d.cols.id.create_index()
yield d
d.close()
f.close()
def eq(a, b):
return (a == b).all()
def test_discover_datashape(data):
ds = discover(data)
t = symbol('t', ds)
columns = t.fields
assert columns is not None
def test_symbol(data):
assert compute(t, data) == data
assert isinstance(data, tb.Table)
def test_single_column(data):
assert eq(compute(t['name'], data), x['name'])
def test_projection(data):
assert eq(compute(t[['name', 'amount']], data), x[['name', 'amount']])
def test_eq(data):
assert eq(compute(t['amount'] == 100, data), x['amount'] == 100)
def test_scalar_ops(data):
from operator import add, sub, mul, truediv
for op in (add, sub, mul, truediv):
assert eq(compute(op(t.amount, 10), data), op(x['amount'], 10))
assert eq(compute(op(t.amount, t.id), data), op(x['amount'], x['id']))
assert eq(compute(op(10.0, t.amount), data), op(10.0, x['amount']))
assert eq(compute(op(10, t.amount), data), op(10, x['amount']))
def test_neg(data):
assert eq(compute(-t.amount, data), -x['amount'])
def test_failing_floordiv(data):
from operator import floordiv as op
with pytest.raises(TypeError):
assert eq(compute(op(t.amount, 10), data), op(x['amount'], 10))
with pytest.raises(TypeError):
assert eq(compute(op(t.amount, t.id), data), op(x['amount'], x['id']))
with pytest.raises(TypeError):
assert eq(compute(op(10.0, t.amount), data), op(10.0, x['amount']))
with pytest.raises(TypeError):
assert eq(compute(op(10, t.amount), data), op(10, x['amount']))
def test_selection(data):
assert eq(compute(t[t['amount'] == 100], data), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], data), x[x['amount'] < 0])
def test_arithmetic(data):
assert eq(compute(t['amount'] + t['id'], data), x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], data), x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], data), x['amount'] % x['id'])
assert eq(compute(t['amount'] + t['id'] + 3, data),
x['amount'] + x['id'] + 3)
def test_reductions(data):
assert compute(t['amount'].count(), data) == len(x['amount'])
assert compute(t['amount'].sum(), data) == x['amount'].sum()
assert compute(t['amount'].mean(), data) == x['amount'].mean()
assert compute(t.amount[0], data) == x['amount'][0]
assert compute(t.amount[-1], data) == x['amount'][-1]
class TestTopLevelReductions(object):
def test_count(self, data):
from blaze import count
assert compute(count(t['amount']), data) == len(x['amount'])
def test_sum(self, data):
from blaze import sum
assert compute(sum(t['amount']), data) == x['amount'].sum()
def test_mean(self, data):
from blaze import mean
assert compute(mean(t['amount']), data) == x['amount'].mean()
class TestFailingSort(object):
"""These fail because we haven't created a completely sorted index"""
def test_basic(self, data):
with pytest.raises(ValueError):
compute(t.sort('id'), data)
@xfail(reason='PyTables does not support multiple column sorting')
def test_multiple_columns(self, data):
compute(t.sort(['amount', 'id']), data)
@xfail(reason='PyTables does not support multiple column sorting')
def test_multiple_columns_sorted_data(self, csi_data):
compute(t.sort(['amount', 'id']), csi_data)
class TestCSISort(object):
def test_basic(self, csi_data):
assert eq(compute(t.sort('amount'), csi_data),
np.sort(x, order='amount'))
assert eq(compute(t.sort('id'), csi_data),
np.sort(x, order='id'))
def test_column_expr(self, csi_data):
assert eq(compute(t.sort(t.amount), csi_data),
np.sort(x, order='amount'))
assert eq(compute(t.sort(t.id), csi_data),
np.sort(x, order='id'))
def test_non_existent_column(self, csi_data):
with pytest.raises(AssertionError):
compute(t.sort('not here'), csi_data)
def test_ascending(self, csi_data):
assert eq(compute(t.sort('amount', ascending=False), csi_data),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort('amount', ascending=False), csi_data),
np.sort(x, order='amount')[::-1])
class TestIndexSort(object):
"""Fails with a partially sorted index"""
@xfail(reason='PyTables cannot sort with a standard index')
def test_basic(self, idx_data):
compute(t.sort('amount'), idx_data)
@xfail(reason='PyTables cannot sort with a standard index')
def test_ascending(self, idx_data):
compute(t.sort('amount', ascending=False), idx_data)
def test_head(data):
assert eq(compute(t.head(2), data), x[:2])
assert eq(compute(t.amount.head(2), data), x['amount'][:2])
@pytest.yield_fixture
def pyt():
tb = pytest.importorskip('tables')
fn = 'test.pyt.h5'
f = tb.open_file(fn, mode='w')
d = f.create_table('/', 'test', x)
yield d
d.close()
f.close()
try:
os.remove(fn)
except OSError:
pass
def test_drop(pyt):
drop(pyt)
with pytest.raises(tb.ClosedNodeError):
drop(pyt)
def test_create_index(pyt):
create_index(pyt, 'id')
assert 'id' in pyt.colindexes
def test_create_multiple_indexes(pyt):
create_index(pyt, ['id', 'amount'])
assert len(pyt.colindexes) == 2
assert 'id' in pyt.colindexes
assert 'amount' in pyt.colindexes
def test_create_multiple_indexes_fails(pyt):
with pytest.raises(ValueError):
create_index(pyt, ['id', 'blarg'])
with pytest.raises(ValueError):
create_index(pyt, ['foo', 'bar'])
def test_create_index_fails(pyt):
with pytest.raises(AttributeError):
create_index(pyt, 'no column here!')
def test_nrows():
assert compute(t.nrows, x) == len(x)
def test_nelements():
assert compute(t.nelements(axis=0), x) == len(x)
assert compute(t.nelements(), x) == len(x)
| bsd-3-clause |
sunericd/ISTools | GeneTK/main.py | 1 | 24389 | #8/17/17 Selecting RE for gelViz has searchbar and separate selected box.
#8/16/17 Error handling!
#8/15/17 Added GUI for primer design. Changed formatting from pack to grid.
#07/13/2017 Tab view for tools. Additional changes to reflect structure from Exe Toolkit.
#07/08/2017 allows multiple files/sequences to be uploaded.
from tkinter import *
from tkinter.ttk import *
from tkinter import filedialog, messagebox
from PIL import Image, ImageTk
from Bio import SeqIO
import mods.gel_visualizer as gv
import os
import mods.plasmid_builder as pb
import pandas as pd
import mods.seqprop as sp
import mods.MSM as msm
import mods.primer_designer as primo
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title('Integrated Sciences Gene Toolkit')
self.style=Style()
self.style.theme_use('default')
self.pack(fill=BOTH, expand=1)
mlabel = Label(self, text='Integrated Sciences Gene Toolkit\n\nDo NOT close this window.', font=('Helvetica', 16),justify=CENTER,wraplength=350)
mlabel.grid(row=0,column=0,pady=15,padx=10,sticky=S+N+E+W)
#Navigation
sButton=Button(self,text='Start',command=self.nav)
sButton.grid(row=1,column=0,padx=15,pady=5)
qButton=Button(self,text='Quit',command=self.quit)
qButton.grid(row=2,column=0,padx=15,pady=5)
# Needed for PyInstaller to read files: filepath = resource_path()
def resource_path(self, relative_path):
''' Get absolute path to resource, works for dev and for PyInstaller '''
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
def quit(self):
quit()
def nav(self):
top=Toplevel()
top.title('Integrated Sciences Gene Toolkit')
reData= pd.read_csv(self.resource_path('data/restriction_sites2.csv'), sep=',')
#######################ALL Page Declarations###################
ntbk=Notebook(top)
mainPg=Frame(ntbk)
gelPg=Frame(ntbk)
plasPg=Frame(ntbk)
credPg=Frame(ntbk)
msmPg=Frame(ntbk)
seqPg=Frame(ntbk)
primPg=Frame(ntbk)
#############################Compile notebook############
self.loadmainPg(mainPg)
self.loadprimPg(primPg)
self.loadseqPg(seqPg, reData)
self.loadmsmPg(msmPg)
self.loadplasPg(plasPg, reData)
self.loadgelPg(gelPg, reData)
self.loadcredPg(credPg)
ntbk.add(mainPg, compound=LEFT, text='Welcome',padding=5)
ntbk.add(gelPg,text='gel.Viz', padding=5)
ntbk.add(msmPg,text='Multiple Sequence Mapper',padding=5)
ntbk.add(plasPg,text='Plasmid BUILDR', padding=5)
ntbk.add(primPg,text='Primer Designer',padding=5)
ntbk.add(seqPg,text='SeqProp',padding=5)
ntbk.add(credPg,text='Credits', padding=5)
ntbk.pack(fill=BOTH)
self.pack(fill=BOTH,expand=True)
def loadmainPg(self,mainPg):
#########################Front page#################################3
#Insert personalized logo
pic = PhotoImage(file=self.resource_path('data/logo.gif'))
pic.zoom(10,10)
mainPg_l1=Label(mainPg,image=pic)
mainPg_l1.image=pic
mainPg_l1.configure(image=pic)
mainPg_l1.grid(row=0, column=0,padx=5, sticky=E+W+S+N)
mainPg_l2=Label(mainPg,text='Welcome to the Integrated Sciences Gene Toolkit. Click on the tabs above to alternate between tools. \n\nSend any questions to [email protected]\n\nVersion 1.0.0\n\nLast updated Aug 15, 2017.', font=('Helvetica', 12), wraplength=250)
mainPg_l2.grid(row=0, column=1, padx=5, sticky=E+W+S+N)
def loadgelPg(self, gelPg, reData):
##############################Gel.Viz Page#################################
#Gene sequence input
gelPg_l1 = Label(gelPg, text='Sequence(s) (DNA bases only). Separate different sequences with spaces.')
gelPg_l1.grid(row=0,column=0,padx=5,pady=5,sticky=W)
gelPg_geneBox = Text(gelPg, height=10)
gelPg_geneBox.grid(row=1,column=0, sticky=E+W+S+N, columnspan=2,padx=5,pady=5)
#Restriction Enzyme list
gelPg_l2 = Label(gelPg, text='Restriction Enzyme(s). Use Add and Remove to select your restriction enzymes.')
gelPg_l2.grid(row=2,column=0,padx=5,pady=5,sticky=W)
#All available REs
allREs=StringVar()
allREs.set(self.getREs(reData))
gelPg_reList = Listbox(gelPg,selectmode='multiple',listvariable=allREs)
gelPg_reList.grid(row=4,column=0,sticky=W+S+N+E,padx=5,pady=5)
#Searchbar
searchKW = StringVar()
searchKW.trace('w', lambda name, index, mode: self.update_list(searchKW, gelPg_reList, self.getREs(reData)))
searchBar = Entry(gelPg, textvariable=searchKW)
searchBar.grid(row=3, column=0, sticky=W+S+N+E, padx=5, pady=5)
#Scrollbar
yScrollRE=Scrollbar(gelPg)
yScrollRE.grid(row=4,column=0,sticky=E+N+S,pady=5)
yScrollRE.configure(command=gelPg_reList.yview)
gelPg_reList.configure(yscrollcommand=yScrollRE.set)
#Selected REs
gelPg_l3 = Label(gelPg, text='Selected Restriction Enzymes')
gelPg_l3.grid(row=2,column=1,padx=5,pady=5,sticky=W+N+S)
gelPg_selectedRE = Listbox(gelPg,selectmode='multiple')
gelPg_selectedRE.grid(row=3,rowspan=2,column=1,sticky=W+S+N+E,padx=5,pady=5)
#Scrollbar
yScrollsel=Scrollbar(gelPg)
yScrollsel.grid(row=3, rowspan=2,column=1,sticky=E+N+S,pady=5)
yScrollsel.configure(command=gelPg_selectedRE.yview)
gelPg_selectedRE.configure(yscrollcommand=yScrollsel.set)
#RE lists interaction
addButton=Button(gelPg,text='Add', command = lambda: self.addREtolist(gelPg_reList,gelPg_selectedRE,reData))
removeButton = Button(gelPg, text='Remove', command= lambda: self.removeRE(gelPg_reList,gelPg_selectedRE,reData))
addButton.grid(row=5, column=0, sticky=E+N+S, pady=5,padx=5)
removeButton.grid(row=5, column=1, sticky=W+N+S, pady=5,padx=5)
#Navigation
gelPg_clr=Button(gelPg,text='Clear',command=lambda:self.loadgelPg(gelPg, reData))
gelPg_clr.grid(row=5,column=0,sticky=W,padx=5,pady=5)
gelPg_ent= Button(gelPg,text='Enter', command=lambda:self.runGV(gelPg_geneBox,gelPg_selectedRE.get(0,END),reData))
gelPg_ent.grid(row=5,column=1,sticky=E,padx=5,pady=5)
#Upload Gene Sequence File Button.
gelPg_upload=Button(gelPg,text='Upload Gene Sequence(s)', command=lambda: self.seqUpload(gelPg_geneBox,True))
gelPg_upload.grid(row=0,column=1,sticky=E,padx=5,pady=5)
def loadplasPg(self,plasPg, reData):
####################################Plasmid BUILDR##################################
#Plasmid sequence input
plasPg_l1 = Label(plasPg, text='One Recipient Plasmid Sequence (DNA bases only)')
plasPg_l1.grid(row=0,column=0, padx=5, pady=5,sticky=W)
plasPg_geneBox = Text(plasPg, height=10)
plasPg_geneBox.grid(row=1,column=0,columnspan=2,padx=5,pady=5,sticky=E+W+S+N)
#Restriction enzymes input
plasPg_l2 = Label(plasPg, text='Markers or Genes (Separate sequences with spaces)')
plasPg_l2.grid(row=2,column=0, padx=5, pady=5,sticky=W)
plasPg_mrkr = Text(plasPg, height=10)
plasPg_mrkr.grid(row=3,column=0, columnspan=2, padx=5, pady=5,sticky=W+E+S+N)
#Navigation
plasPg_clr=Button(plasPg,text='Clear',command=lambda:self.loadplasPg(plasPg, reData))
plasPg_clr.grid(row=4,column=0, padx=5, pady=5,sticky=W)
plasPg_ent= Button(plasPg,text='Enter', command=lambda:self.runBUILDR(plasPg_geneBox,plasPg_mrkr,reData))
plasPg_ent.grid(row=4,column=1, padx=5, pady=5,sticky=E)
plasPg_upl1=Button(plasPg,text='Upload Recipient Plasmid Sequence', command=lambda: self.seqUpload(plasPg_geneBox,False))
plasPg_upl1.grid(row=0,column=1, padx=5, pady=5,sticky=E)
plasPg_upl2=Button(plasPg,text='Upload Marker/Gene Sequence(s)', command=lambda: self.seqUpload(plasPg_mrkr,True))
plasPg_upl2.grid(row=2,column=1, padx=5, pady=5,sticky=E)
def loadmsmPg(self,msmPg):
##############################MSM####################################
#Inputs
msmPg_l1 = Label(msmPg, text='Select a .csv or .tsv file containing the names and the sequences.')
msmPg_l1.grid(row=0,column=0, padx=5, pady=5,sticky=W,columnspan=2)
msmPg_l2 = Label(msmPg, text='Filename: ')
msmPg_l2.grid(row=1,column=0, padx=5, pady=5,sticky=W+N+S)
msmPg_browse=Button(msmPg, text='Browse', command=lambda: self.getDir(msmPg_box))
msmPg_browse.grid(row=1,column=1, padx=5, pady=5,sticky=E)
msmPg_box = Text(msmPg, height=1, state=DISABLED)
msmPg_box.grid(row=2,column=0, columnspan=2, padx=5, pady=5,sticky=N+S+W)
msmPg_l3 = Label(msmPg, text='Motif Length (Optional, default is 10): ')
msmPg_l3.grid(row=3,column=0, padx=5, pady=5,sticky=W)
msmPg_motif = Text(msmPg, height=1)
msmPg_motif.grid(row=4,column=0,padx=5, pady=5,sticky=W+N+S+E,columnspan=2)
msmPg_l4 = Label(msmPg, text='Match Threshold (Optional, default is 3): ')
msmPg_l4.grid(row=5,column=0, padx=5, pady=5,sticky=W)
msmPg_thresh = Text(msmPg, height=1)
msmPg_thresh.grid(row=6,column=0, padx=5, pady=5,sticky=W+N+S+E,columnspan=2)
#Navigation
msmPg_clr=Button(msmPg,text='Clear',command=lambda:self.loadmsmPg(msmPg))
msmPg_clr.grid(row=7,column=0, padx=5, pady=5,sticky=W)
msmPg_ent= Button(msmPg,text='Enter', command=lambda:self.runMSM(msmPg_box,msmPg_motif,msmPg_thresh))
msmPg_ent.grid(row=7,column=1, padx=5, pady=5,sticky=E)
def loadseqPg(self,seqPg, reData):
#############################SeqProp##################################
l1 = Label(seqPg, text='Sequence(s) (DNA bases only). Separate different sequences with spaces.')
l1.grid(row=0,column=0, padx=5, pady=5,sticky=W)
box = Text(seqPg, height=10)
box.grid(row=1,column=0, columnspan=2, padx=5, pady=5,sticky=E+W+N+S)
#Navigation
ul1 = Button(seqPg, text= 'Upload Sequence(s)', command = lambda: self.seqUpload(box, True))
ul1.grid(row=0,column=1, padx=5, pady=5,sticky=E)
clr=Button(seqPg,text='Clear',command=lambda:self.loadseqPg(seqPg, reData))
clr.grid(row=2,column=0, padx=5, pady=5,sticky=W)
ent= Button(seqPg,text='Enter', command=lambda:self.runSeqProp(box,reData))
ent.grid(row=2,column=1, padx=5, pady=5,sticky=E)
def loadcredPg(self,credPg):
##########################Credits#################################
cred = Text(credPg,wrap=WORD)
cred.insert(END, 'The Integrated Sciences Group is dedicated to the development of free, open-source tools for a range of scientific research. We ship large toolkits with user inteferfaces in order to make the application of our tools as seamless as possible. We are currently working to improve our current tools and to build new ones!\n\ngel.Viz is a gel visualization tool that generates an image of a gel given a sequence and restriction enzymes. This tool is the perfect resource to plan and verify experiments involving gel electrophoresis.\n\nMSM, or Multiple Sequence Mapping, finds all exact DNA matches between a list of sequences in a specified read frame.\n\nPlasmid BUILDR, or Benchmark Utility for Integrating Long DNA Reads, generates a protocol to build a desired plasmid given the sequences of the recepient plasmid and any insert genes or markers.\n\nPrimer Designer finds the optimal forward and/or reverse primers for amplifying a given sequence.\n\nSeqProp, or Sequence Properties, analyzes a DNA sequence for GC content, melting temperature, restriction enzyme sites, start and stop codons, and possible exons.\n\nDevelopers: Yein Christina Park, Eric Sun, and Yi Chen.\n\nVisit our website at integratedsciences.org.\n\nQuestions? Concerns? Email us at [email protected].\n\nMany thanks to Jimmy Thai and Siavash Zamirpour.\n\nCopyright 2017 Integrated Sciences\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \'Software\'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \'AS IS\', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.')
cred.grid(row=0,column=0, pady=5,sticky=E+W+S+N)
yScrollCredits=Scrollbar(credPg)
yScrollCredits.grid(row=0,column=1, pady=5,sticky=E+N+S)
yScrollCredits.config(command=cred.yview)
cred.config(state=DISABLED,yscrollcommand=yScrollCredits.set)
def loadprimPg(self, primPg):
#############################PrimerDesign##################################
primPg_l1 = Label(primPg, text='One sequence to amplify AND surrounding regions (DNA bases only)')
primPg_l1.grid(row=0,column=0, padx=5, pady=5,sticky=W)
primPg_box = Text(primPg, height=10)
primPg_box.grid(row=1,column=0, columnspan=2, padx=5, pady=5,sticky=E+N+S+W)
primPg_l2 = Label(primPg, text='Start and end indices of sequence to amplify (Ex. 196-291)')
primPg_l2.grid(row=2,column=0, padx=5, pady=5,sticky=W)
primPg_idx = Text(primPg, height=1, width=20)
primPg_idx.grid(row=2,column=1, padx=5, pady=5,sticky=E+W+S+N)
primPg_l3 = Label(primPg, text='GC Content (Optional, default is 40-60%)')
primPg_l3.grid(row=3,column=0, padx=5, pady=5,sticky=W)
primPg_gc = Text(primPg, height=1, width=40)
primPg_gc.grid(row=3,column=1, padx=5, pady=5,sticky=E+N+S+W)
primPg_l4 = Label(primPg, text='Melting Temperature (Optional, default is 52-58C)')
primPg_l4.grid(row=4,column=0, padx=5, pady=5,sticky=W)
primPg_tm = Text(primPg, height=1, width=40)
primPg_tm.grid(row=4,column=1, padx=5, pady=5,sticky=W+S+E+N)
primPg_l5 = Label(primPg, text='Primer length (Optional, default is 18-22)')
primPg_l5.grid(row=5,column=0, padx=5, pady=5,sticky=W)
primPg_len = Text(primPg, height=1, width=40)
primPg_len.grid(row=5,column=1, padx=5, pady=5,sticky=E+W+S+N)
primPg_l6 = Label(primPg, text='Product length (Optional, default is 300-400))')
primPg_l6.grid(row=6,column=0, padx=5, pady=5,sticky=W)
primPg_prod = Text(primPg, height=1, width=40)
primPg_prod.grid(row=6,column=1, padx=5, pady=5,sticky=E+W+S+N)
choice=StringVar()
primPg_f = Radiobutton(primPg,text='Forward primer only', variable=choice, value='f')
primPg_r = Radiobutton(primPg,text='Reverse primer only', variable=choice, value='r')
primPg_fr = Radiobutton(primPg,text='Both forward and reverse primers (Default)', variable=choice, value='fr')
primPg_f.grid(row=7,column=0, columnspan=2,padx=5,pady=5,sticky=W+N+S)
primPg_r.grid(row=7,column=0, columnspan=2,padx=5,pady=5,sticky=N+S)
primPg_fr.grid(row=7,column=0, columnspan=2,padx=5,pady=5,sticky=E+N+S)
primPg_fr.invoke()
#Navigation
primPg_ul1 = Button(primPg, text= 'Upload Sequence', command = lambda: self.seqUpload(primPg_box, True))
primPg_ul1.grid(row=0,column=1, padx=5, pady=5,sticky=E)
primPg_fr.invoke()
primPg_clr=Button(primPg,text='Clear',command=lambda: self.loadprimPg(primPg))
primPg_clr.grid(row=8,column=0, padx=5, pady=5,sticky=W)
primPg_ent= Button(primPg,text='Enter', command=lambda:self.runPrim(primPg_box, primPg_idx, primPg_gc, primPg_tm, primPg_len, primPg_prod, choice.get()))
primPg_ent.grid(row=8,column=1, padx=5, pady=5,sticky=E)
def getREs(self,reData):
re = reData.values.T.tolist()
re = re[0]
return re
def addREtolist(self,optionList,selectedList, reData):
re_idx=optionList.curselection()
allre = optionList.get(0,END)
for idx in re_idx:
selectedList.insert(END,allre[idx])
def removeRE(self, optionList, selectedList, reData):
re_idx = selectedList.curselection()
allre= self.getREs(reData)
adjust = 0
for idx in re_idx:
selectedList.delete(idx-adjust)
adjust=adjust+1
def update_list(self, searchKW, lbox, allREs):
search_term = searchKW.get()
lbox_list = allREs
lbox.delete(0, END)
for item in lbox_list:
if search_term.lower() in item.lower():
lbox.insert(END, item)
def seqUpload(self,tBox, multiple):
ftypes=[('Fasta files','*.fasta'),('Text files','*.txt')]
fname = filedialog.askopenfilename(title='Select file', filetypes=ftypes)
if fname != '':
text=''
if 'fasta' in fname:
for dnaSeq in SeqIO.parse(fname,'fasta'):
temp=(dnaSeq.seq)
text=text+' '+temp
else:
temp= self.readFile(fname)
text=text+' '+temp
if not multiple:
tBox.delete('1.0',END)
tBox.insert(END, text)
def delBox(self,box):
box.configure(state=NORMAL)
box.delete('1.0',END)
box.configure(state=DISABLED)
def readFile(self, filename):
f = open(filename, 'r')
text = f.read()
return text
def getDir(self, tBox):
ftypes=[('CSV files', '*.csv'),('TSV files', '*.tsv')]
fname=filedialog.askopenfilename(title='Select file',filetypes=ftypes)
tBox.configure(state=NORMAL)
tBox.delete('1.0',END)
tBox.insert(END,fname)
tBox.configure(state=DISABLED)
#Get info from text boxes to run gel.Viz.
def runGV(self, geneBox,selRE,reData):
seq=geneBox.get('1.0',END)
seq = str(seq).strip()
if len(seq)>0 and len(selRE)>0:
try:
gv.gel_visualize(seq,selRE,reData)
except UserWarning as errormsg:
messagebox.showerror('Error', errormsg)
''' except Exception as e:
print(e)
messagebox.showerror('Error', 'There was an unexpected error. Please reference the documentation (link) or contact us at [email protected].') '''
else:
messagebox.showerror('Error','Fill out all required fields!')
#Get info from text boxes to run PlasBUILDR
def runBUILDR(self, geneBox,reBox,reData):
seq=geneBox.get('1.0',END)
re=reBox.get('1.0',END)
seq=geneBox.get('1.0',END)
seq = str(seq).strip()
re=reBox.get('1.0',END)
re=str(re).strip()
if len(seq)>0 and len(re)>0:
try:
pb.plasmid_builder(seq, re,reData)
except UserWarning as errormsg:
messagebox.showerror('Error', errormsg)
''' except Exception as e:
print(e)
messagebox.showerror('Error', 'There was an unexpected error. Please reference the documentation (link) or contact us at [email protected].') '''
else:
messagebox.showerror('Error','Fill out all required fields!')
#Get info from text boxes to run SeqProp
def runSeqProp(self, seqBox, reData):
seq=seqBox.get('1.0',END)
seq=str(seq).strip()
if len(seq)>0:
try:
sp.multSeqProp(seq, reData)
except UserWarning as errormsg:
messagebox.showerror('Error', errormsg)
''' except Exception as e:
print(e)
messagebox.showerror('Error', 'There was an unexpected error. Please reference the documentation (link) or contact us at [email protected].') '''
else:
messagebox.showerror('Error','Fill out all required fields!')
seqBox.focus()
def runMSM(self,fileBox, motifBox, threshBox):
if len(fileBox.get('1.0',END))>1:
fname=str(self.resource_path(fileBox.get('1.0',END)))
fname=fname.strip()
motif=str(motifBox.get('1.0',END)).strip()
thresh=str(threshBox.get('1.0',END)).strip()
if not len(motif)>0:
motif=int(10)
if not len(thresh)>0:
thresh=int(3)
try:
msm.msm(fname,motif_length=motif,match_threshold=thresh)
except UserWarning as errormsg:
messagebox.showerror('Error', errormsg)
''' except Exception as e:
print(e)
messagebox.showerror('Error', 'There was an unexpected error. Please reference the documentation (link) or contact us at [email protected].') '''
else:
messagebox.showerror('Error','Fill out all required fields!')
#Get info from text boxes to run Primer Design
def runPrim(self,seqBox, idx, gcBox, tmBox, lenBox, prodBox, primType):
seq=seqBox.get('1.0',END)
seq=str(seq).strip()
#seq_idx will be a string. First value is start, last value is end. Separated by '-'
seq_idx=idx.get('1.0',END)
seq_idx=str(seq_idx).strip()
gc=gcBox.get('1.0',END)
gc=str(gc).strip()
gc=gc.strip('%')
tm=tmBox.get('1.0',END)
tm=str(tm).strip()
tm=tm.strip('C')
length=lenBox.get('1.0',END)
length=str(length).strip()
prodlen=prodBox.get('1.0',END)
prodlen=str(prodlen).strip()
if len(seq)>0 and len(seq_idx)>0 and seq_idx.find('-')!=-1:
if not len(gc)>0:
gc='40-60'
if not len(tm)>0:
tm='52-58'
if not len(length)>0:
length='18-22'
if not len(prodlen)>0:
prodlen='300-400'
try:
primo.primer_designer(primType.strip(), seq, indices=seq_idx.split('-'), primer_length=length.split('-'), temp_range=tm.split('-'), gc_range=gc.split('-'), product_range=prodlen.split('-'))
except UserWarning as errormsg:
messagebox.showerror('Error', errormsg)
''' except Exception as e:
print(e)
messagebox.showerror('Error', 'There was an unexpected error. Please reference the documentation (link) or contact us at [email protected].') '''
elif seq_idx.find('-')>-1:
messagebox.showerror('Error', 'Fill out all required fields!')
else:
messagebox.showerror('Error', 'Input a range for the start and end indices!')
#Runs the program
def main():
root = Tk()
root.geometry('350x300+300+300')
app = Example(root)
root.protocol('WM_DELETE_WINDOW', app.quit)
app.mainloop()
if __name__ == '__main__':
main() | mit |
joernhees/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
annoviko/pyclustering | pyclustering/cluster/tests/unit/ut_fcm.py | 1 | 5815 | """!
@brief Unit-tests for Fuzzy C-Means algorithm.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.tests.fcm_templates import fcm_test_template
from pyclustering.cluster.fcm import fcm
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FAMOUS_SAMPLES, FCPS_SAMPLES
class fcm_unit_tests(unittest.TestCase):
def test_cluster_allocation_simple01(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [6.7, 7.5]], 2.0, [5, 5], False)
def test_cluster_allocation_simple01_one_cluster(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5]], 2.0, [10], False)
def test_cluster_allocation_simple01_centers_are_points1(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.768699, 5.364477], [6.593196, 7.850364]], 2.0, [5, 5], False)
def test_cluster_allocation_simple01_centers_are_points2(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.936690, 5.663041], [6.968136, 7.755556]], 2.0, [5, 5], False)
def test_cluster_allocation_simple01_wrong_amount(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [[3.7, 5.5], [4.7, 6.5], [3.4, 6.4]], 2.0, [2, 3, 5], False)
def test_cluster_allocation_simple02(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5]], 2.0, [10, 5, 8], False)
def test_cluster_allocation_simple02_wrong_amount(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [[3.5, 4.8], [6.9, 7], [7.5, 0.5], [4.5, 6.2]], 2.0, [4, 5, 6, 8], False)
def test_cluster_allocation_simple03(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [[0.2, 0.1], [4.0, 1.0], [2.0, 2.0], [2.3, 3.9]], 2.0, [10, 10, 10, 30], False)
def test_cluster_allocation_simple04(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [[1.5, 0.0], [1.5, 2.0], [1.5, 4.0], [1.5, 6.0], [1.5, 8.0]], 2.0, [15, 15, 15, 15, 15], False)
def test_cluster_allocation_simple05(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 0.0]], 2.0, [15, 15, 15, 15], False)
def test_cluster_allocation_simple06(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE6, [[2.0, 6.0], [8.5, 4.5]], 2.0, [20, 21], False)
def test_cluster_allocation_simple07(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, [[-3.0], [2.0]], 2.0, [10, 10], False)
def test_cluster_allocation_simple08(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, [[-4.0], [3.1], [6.1], [12.0]], 2.0, [15, 30, 20, 80], False)
def test_cluster_allocation_simple09(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, [[4.0], [8.0]], 2.0, [10, 20], False)
def test_cluster_allocation_simple10(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE10, [[0.426, 0.065 ], [5.462, 6.529], [9.539, 11.379]], 2.0, [11, 11, 11], False)
def test_cluster_allocation_simple11(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE11, [[1.0, 0.6, 0.8], [4.1, 4.2, 4.3]], 2.0, [10, 10], False)
def test_cluster_allocation_simple12(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, [[1.0, 1.0], [2.5, 2.5], [4.0, 4.0]], 2.0, [5, 5, 5], False)
def test_cluster_allocation_simple13(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE13, [[1.35, 1.21, 0.0], [3.79, 4.21, 0.0]], 2.0, [5, 5], False)
def test_cluster_allocation_simple14(self):
fcm_test_template.cluster_allocation(SIMPLE_SAMPLES.SAMPLE_SIMPLE14, [[5.649, 5.199]], 2.0, [41], False)
def test_cluster_allocation_famous_oldfaithful(self):
fcm_test_template.cluster_allocation(FAMOUS_SAMPLES.SAMPLE_OLD_FAITHFUL, [[4.0, 70], [1.0, 48]], 2.0, None, False)
def test_cluster_allocation_two_diamonds(self):
fcm_test_template.cluster_allocation(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [[0.71 -0.51], [0.99 -0.24]], 2.0, [400, 400], False)
def test_cluster_allocation_tetra(self):
fcm_test_template.cluster_allocation(FCPS_SAMPLES.SAMPLE_TETRA, [[1.001, -0.083, -0.681], [-0.811, 0.476, -0.759], [-0.956, -1.427, -0.020], [0.225, 0.560, 1.794]], 2.0, [100, 100, 100, 100], False)
def test_cluster_allocation_fcps_hepta(self):
fcm_test_template.cluster_allocation(FCPS_SAMPLES.SAMPLE_HEPTA,
[[-0.06,0.02, 0.02], [2.41, 0.49, 0.03], [-2.69, 0.34, 0.29], [0.49, 2.89, 0.78], [-0.60, -2.31, 0.05], [-0.15, 0.77, 3.23], [-0.50, 0.43, -2.60]],
2.0, [30, 30, 30, 30, 30, 30, 32], False)
def test_cluster_allocation_fcps_hepta_wrong_amount(self):
fcm_test_template.cluster_allocation(FCPS_SAMPLES.SAMPLE_HEPTA,
[[-0.06,0.02, 0.02], [2.41, 0.49, 0.03], [-2.69, 0.34, 0.29], [0.49, 2.89, 0.78], [-0.60, -2.31, 0.05], [-0.50, 0.43, -2.60]],
2.0, [30, 30, 30, 30, 30, 62], False)
def test_incorrect_data(self):
self.assertRaises(ValueError, fcm, [], 1)
def test_incorrect_centers(self):
self.assertRaises(ValueError, fcm, [[0], [1], [2]], [])
| gpl-3.0 |
CVML/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 46 | 18585 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# Licence: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false)
np.seterr(all='warn')
ACTIVATION_TYPES = ["logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero"""
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example."""
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(algorithm='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.classes_ = [0, 1]
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.label_binarizer_.y_type_ = 'binary'
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
assert_almost_equal(mlp.decision_function(X), 1.043, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
algorithm='l-bfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuses past solution."""
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected."""
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected"""
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error"""
X = [[3, 2]]
y = [0]
clf = MLPClassifier(algorithm='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(algorithm='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(algorithm='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling."""
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(
algorithm='sgd').partial_fit,
X, y,
classes=[2])
# l-bfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(algorithm='l-bfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error"""
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(algorithm='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multi():
# Test that predict_proba works as expected for multi class."""
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(random_state=1, hidden_layer_sizes=15)
mlp.fit(X, y)
pred1 = mlp.decision_function(X)
mlp.fit(X_sparse, y)
pred2 = mlp.decision_function(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the algorithm to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, algorithm='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd',
learning_rate='adaptive', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
| bsd-3-clause |
bwpriest/rossmannsales | src/exploratory_regression.py | 1 | 2067 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 12:53:42 2015
@author: Reed
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
import patsy
'''
script for basic exploratory analysis and naively pooled regression across all stores
'''
enriched_path = '../data/enriched/'
raw_path = '../data/raw/'
trainset = pd.read_csv(enriched_path + "train.csv")
train_reg =trainset[['Sales','Store','DayOfWeek','Assortment','CompetitionDistance','Promo','DaysOpen','StoreType','LastSchoolHoliday','LastPromo','CurrentPromo','LastSchoolHoliday']]
train_reg[['DayOfWeek','Store']]= train_reg[['DayOfWeek','Store']].astype(str)
train_reg_np = np.asarray(train_reg)
# run OLS regression
result_full = sm.ols(formula="Sales ~ Assortment + Store + DayOfWeek + CompetitionDistance + Promo + DaysOpen + StoreType + LastSchoolHoliday + LastPromo + CurrentPromo + LastSchoolHoliday", data=train_reg).fit()
result_full.summary()
#look at store 1
store_select = trainset.groupby('Store')
store1_Data = store_select.get_group(1)
store1_Data = store1_Data[['Sales','Date','Assortment','CompetitionDistance','Promo','DaysOpen','StoreType','LastSchoolHoliday','LastPromo','CurrentPromo','LastSchoolHoliday']]
store1_Data=store1_Data.reset_index()
plt.plot(store1_Data.index, store1_Data['Sales'])
plt.xlabel('time')
plt.ylabel('sales')
plt.title('store 1 sales over time')
#Add labels for month
#xmas 2013
store1_Data[290:310]
#xmas 2014
store1_Data[290:310]
#Easter 2014
store1_Data[385:405]
#Easter 2015
store1_Data[385:405]
#average sales by date
trainset_time_sales=trainset[['Sales','Date']]
trainset_time_sales=trainset_time_sales.groupby('Date').aggregate(np.mean).reset_index()
trainset_time_sales =trainset_time_sales.sort(columns='Date')
plt.plot(trainset_time_sales.index, trainset_time_sales['Sales'])
plt.xlabel('time')
plt.ylabel('sales')
plt.title('average sales over time')
#mean absolute residual
result_full.resid.abs().mean()
| apache-2.0 |
adamobeng/ibis | ibis/config.py | 16 | 20779 | # This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from contextlib import contextmanager
import pprint
import warnings
import sys
from six import StringIO
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "unicode_escape")
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
buf = StringIO()
pprint.pprint(self.d, stream=buf)
return buf.getvalue()
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError("Value must be one of %s"
% str("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
# is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
| apache-2.0 |
kleskjr/scipy | scipy/signal/spectral.py | 28 | 34979 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hann' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hann' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode=mode)
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hann', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hann' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving average
filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
"""
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating data?",
http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
| bsd-3-clause |
csherwood-usgs/landlab | landlab/components/overland_flow/examples/deAlmeida_DEM_driver.py | 5 | 4529 | #! /usr/env/python
""" deAlmeida_SquareBasin.py
This is a example driver which utilizes the
OverlandFlow class from generate_overland_flow_deAlmeida.py
The driver reads in a square watershed run to steady state using a simple
stream power driver.
It then routes a storm across the square watershed. Storm parameters taken from
Hawk and Eagleson (1992) Poisson parameters for the Denver, CO station.
After the storm, additional time is needed to drain the water from the system.
At the end of the storm, total water depth mass is calculated and compared
against the predicted water mass under steady state conditions. The hydrograph
is plotted and percent error is output.
Written by Jordan M. Adams, April 2016.
"""
from __future__ import print_function
from landlab.components.overland_flow import OverlandFlow
from landlab.io import read_esri_ascii
from matplotlib import pyplot as plt
import os
import time
import numpy as np
# This provides us with an initial time. At the end, it gives us total
# model run time in seconds.
start_time = time.time()
## This is a steady-state landscape generated by simple stream power
## This is a 200 x 200 grid with an outlet at center of the bottom edge.
dem_name ='Square_TestBasin.asc'
## Now we can create and initialize a raster model grid by reading a DEM
## First, this looks for the DEM in the overland_flow folder in Landlab
DATA_FILE = os.path.join(os.path.dirname(__file__), dem_name)
## Now the ASCII is read, assuming that it is standard ESRI format.
(rmg, z) = read_esri_ascii(DATA_FILE)
## Start time 1 second
elapsed_time = 1.0
## Model Run Time in seconds
model_run_time = 216000.0
## Lists for saving data
discharge_at_outlet = []
hydrograph_time_sec = []
hydrograph_time_hrs = []
## Setting initial fields...
rmg['node']['topographic__elevation'] = z
rmg['link']['surface_water__discharge'] = np.zeros(rmg.number_of_links)
rmg['node']['surface_water__depth'] = np.zeros(rmg.number_of_nodes)
## and fixed link boundary conditions...
rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True,
fixed_link_value_of='surface_water__discharge')
## Setting the outlet node to OPEN_BOUNDARY
rmg.status_at_node[100] = 1
## Initialize the OverlandFlow() class.
of = OverlandFlow(rmg, use_fixed_links = True, steep_slopes=True)
## Record the start time so we know how long it runs.
start_time = time.time()
## Link to sample at the outlet
link_to_sample = 299
## Storm duration in seconds
storm_duration = 7200.0
## Running the overland flow component.
while elapsed_time < model_run_time:
## The storm starts when the model starts. While the elapsed time is less
## than the storm duration, we add water to the system as rainfall.
if elapsed_time < storm_duration:
of.rainfall_intensity = 4.07222 * (10 ** -7) # Rainfall intensity (m/s)
## Then the elapsed time exceeds the storm duration, rainfall ceases.
else:
of.rainfall_intensity = 0.0
## Generating overland flow based on the deAlmeida solution.
of.overland_flow()
## Append time and discharge to their lists to save data and for plotting.
hydrograph_time_sec.append(elapsed_time)
hydrograph_time_hrs.append(round(elapsed_time/3600., 2))
discharge_at_outlet.append(of.q[link_to_sample])
## Add the time step, repeat until elapsed time >= model_run_time
print(elapsed_time)
elapsed_time += of.dt
plt.figure(1)
plt.imshow(z.reshape(rmg.shape), origin='left', cmap='pink')
plt.tick_params(axis='both', labelbottom='off', labelleft='off')
cb = plt.colorbar()
cb.set_label('Elevation (m)', rotation=270, labelpad=15)
plt.figure(2)
plt.plot(hydrograph_time_hrs, (np.abs(discharge_at_outlet)*rmg.dx), 'b-')
plt.xlabel('Time (hrs)')
plt.ylabel('Discharge (cms)')
plt.title('Hydrograph')
calc_water_mass = round(np.abs((np.trapz(hydrograph_time_sec, (np.abs(
discharge_at_outlet) * rmg.dx)))), 2)
theoretical_water_mass = round(((rmg.number_of_core_nodes * rmg.cellarea) *
(4.07222 * (10 ** -7)) * storm_duration), 2)
percent_error = round(((np.abs(calc_water_mass) - theoretical_water_mass) /
theoretical_water_mass * 100), 2)
print('\n', 'Total calculated water mass: ', calc_water_mass)
print('\n', 'Theoretical water mass (Q = P * A): ', theoretical_water_mass)
print('\n', 'Percent Error: ', percent_error, ' %')
endtime = time.time()
print('\n', 'Total run time: ', round(endtime - start_time, 2), ' seconds')
| mit |
jrversteegh/softsailor | deps/numpy-1.6.1/numpy/linalg/linalg.py | 22 | 61161 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
lrwork = min(m,n)*max(5*min(m,n)+7, 2*max(m,n)+2*min(m,n)+1)
rwork = zeros((lrwork,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-3.0 |
goddoe/CADL | session-2/libs/gif.py | 7 | 2381 | """Utility for creating a GIF.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def build_gif(imgs, interval=0.1, dpi=72,
save_gif=True, saveto='animation.gif',
show_gif=False, cmap=None):
"""Take an array or list of images and create a GIF.
Parameters
----------
imgs : np.ndarray or list
List of images to create a GIF of
interval : float, optional
Spacing in seconds between successive images.
dpi : int, optional
Dots per inch.
save_gif : bool, optional
Whether or not to save the GIF.
saveto : str, optional
Filename of GIF to save.
show_gif : bool, optional
Whether or not to render the GIF using plt.
cmap : None, optional
Optional colormap to apply to the images.
Returns
-------
ani : matplotlib.animation.ArtistAnimation
The artist animation from matplotlib. Likely not useful.
"""
imgs = np.asarray(imgs)
h, w, *c = imgs[0].shape
fig, ax = plt.subplots(figsize=(np.round(w / dpi), np.round(h / dpi)))
fig.subplots_adjust(bottom=0)
fig.subplots_adjust(top=1)
fig.subplots_adjust(right=1)
fig.subplots_adjust(left=0)
ax.set_axis_off()
if cmap is not None:
axs = list(map(lambda x: [
ax.imshow(x, cmap=cmap)], imgs))
else:
axs = list(map(lambda x: [
ax.imshow(x)], imgs))
ani = animation.ArtistAnimation(
fig, axs, interval=interval*1000, repeat_delay=0, blit=False)
if save_gif:
try:
ani.save(saveto, writer='imagemagick', dpi=dpi)
except:
print('You do not have imagemagick installed.\n\nOn OSX ' +
'you can install this by first installing homebrew: ' +
'http://brew.sh\nThen run: "brew install imagemagick".\n' +
'Windows users can obtain a binary installation here: ' +
'https://www.imagemagick.org/script/binary-releases.php\n' +
'And Linux users should be able to install imagemagick using ' +
'their package manager, e.g.: sudo apt-get install imagemagick.')
if show_gif:
plt.show()
return ani
| apache-2.0 |
erikgrinaker/BOUT-dev | examples/elm-pb/Python/plotmode2.py | 4 | 1603 | from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
from numpy import *;
#from scipy.io import readsav;
import matplotlib.pyplot as plt;
from boutdata.collect import collect
# Dynamic matplotlib settings
from matplotlib import rcParams;
rcParams['font.size'] = 20;
rcParams['legend.fontsize'] = 'small';
rcParams['legend.labelspacing'] = 0.1;
rcParams['lines.linewidth'] = 2;
rcParams['savefig.bbox'] = 'tight';
# Create image directory if not exists
import os;
if not os.path.exists('image'):
os.makedirs('image');
path='./data/'
data=collect('P',path=path)
#fphi = transpose(readsav('fphi.idl.dat')['fphi'])[:,:,:,];
fphi = fft.fft(data, axis=3)
plt.figure();
for i in range(1, 9):
print("Growth rate for mode number", i)
print(gradient(log(abs(fphi[:,34, 32, i]))))
plt.semilogy(((abs(fphi[:,34, 32, i]))), label = 'n=' + str(i * 5));
plt.legend(loc=2);
plt.xlabel('Time');
plt.savefig('image/plotmode.png');
plt.savefig('image/plotmode.eps');
plt.show(block=False);
plt.figure();
for i in range(1, 9):
plt.plot(abs(fphi[-1, :, 32, i]), label = 'n=' + str(i * 5));
plt.legend();
plt.xlabel('X index');
plt.savefig('image/plotmodeamp.png');
plt.savefig('image/plotmodeamp.eps');
plt.show(block=False);
plt.figure();
for i in range(1, 9):
plt.plot(old_div(abs(fphi[-1, :, 32, i]),abs(fphi[-1, :, 32, i]).max()), label = 'n=' + str(i * 5));
plt.legend();
plt.xlabel('X index');
plt.savefig('image/plotmodenorm.png');
plt.savefig('image/plotmodenorm.eps');
plt.show();
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/mpl_toolkits/tests/test_mplot3d.py | 3 | 10669 | import sys
import nose
from nose.tools import assert_raises
from mpl_toolkits.mplot3d import Axes3D, axes3d
from matplotlib import cm
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import numpy as np
@image_comparison(baseline_images=['bar3d'], remove_text=True)
def test_bar3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]):
xs = np.arange(20)
ys = np.arange(20)
cs = [c] * len(xs)
cs[0] = 'c'
ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8)
@image_comparison(baseline_images=['contour3d'], remove_text=True)
def test_contour3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
@image_comparison(baseline_images=['contourf3d'], remove_text=True)
def test_contourf3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
@image_comparison(baseline_images=['contourf3d_fill'], remove_text=True)
def test_contourf3d_fill():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(np.arange(-2, 2, 0.25), np.arange(-2, 2, 0.25))
Z = X.clip(0, 0)
# This produces holes in the z=0 surface that causes rendering errors if
# the Poly3DCollection is not aware of path code information (issue #4784)
Z[::5, ::5] = 0.1
cset = ax.contourf(X, Y, Z, offset=0, levels=[-0.1, 0], cmap=cm.coolwarm)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_zlim(-1, 1)
@image_comparison(baseline_images=['lines3d'], remove_text=True)
def test_lines3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
ax.plot(x, y, z)
@image_comparison(baseline_images=['mixedsubplot'], remove_text=True)
def test_mixedsubplots():
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return np.multiply(s1, e1)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
fig = plt.figure(figsize=plt.figaspect(2.))
ax = fig.add_subplot(2, 1, 1)
l = ax.plot(t1, f(t1), 'bo',
t2, f(t2), 'k--', markerfacecolor='green')
ax.grid(True)
ax = fig.add_subplot(2, 1, 2, projection='3d')
X, Y = np.meshgrid(np.arange(-5, 5, 0.25), np.arange(-5, 5, 0.25))
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim3d(-1, 1)
@image_comparison(baseline_images=['scatter3d'], remove_text=True)
def test_scatter3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.arange(10), np.arange(10), np.arange(10),
c='r', marker='o')
ax.scatter(np.arange(10, 20), np.arange(10, 20), np.arange(10, 20),
c='b', marker='^')
@image_comparison(baseline_images=['surface3d'], remove_text=True)
def test_surface3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
lw=0, antialiased=False)
ax.set_zlim(-1.01, 1.01)
fig.colorbar(surf, shrink=0.5, aspect=5)
@image_comparison(baseline_images=['text3d'])
def test_text3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))
xs = (2, 6, 4, 9, 7, 2)
ys = (6, 4, 8, 7, 2, 2)
zs = (4, 2, 5, 6, 1, 7)
for zdir, x, y, z in zip(zdirs, xs, ys, zs):
label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir)
ax.text(x, y, z, label, zdir)
ax.text(1, 1, 1, "red", color='red')
ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes)
ax.set_xlim3d(0, 10)
ax.set_ylim3d(0, 10)
ax.set_zlim3d(0, 10)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
@image_comparison(baseline_images=['trisurf3d'], remove_text=True)
def test_trisurf3d():
n_angles = 36
n_radii = 8
radii = np.linspace(0.125, 1.0, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = np.append(0, (radii*np.cos(angles)).flatten())
y = np.append(0, (radii*np.sin(angles)).flatten())
z = np.sin(-x*y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)
@image_comparison(baseline_images=['wireframe3d'], remove_text=True)
def test_wireframe3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
@image_comparison(baseline_images=['wireframe3dzerocstride'], remove_text=True,
extensions=['png'])
def test_wireframe3dzerocstride():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=0)
@image_comparison(baseline_images=['wireframe3dzerorstride'], remove_text=True,
extensions=['png'])
def test_wireframe3dzerorstride():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=0, cstride=10)
@cleanup
def test_wireframe3dzerostrideraises():
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager "
"not supported with Python < 2.7")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
with assert_raises(ValueError):
ax.plot_wireframe(X, Y, Z, rstride=0, cstride=0)
@image_comparison(baseline_images=['quiver3d'], remove_text=True)
def test_quiver3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_empty'], remove_text=True)
def test_quiver3d_empty():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:0j, -1:0.8:0j, -1:0.6:0j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_masked'], remove_text=True)
def test_quiver3d_masked():
fig = plt.figure()
ax = fig.gca(projection='3d')
# Using mgrid here instead of ogrid because masked_where doesn't
# seem to like broadcasting very much...
x, y, z = np.mgrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
u = np.ma.masked_where((-0.4 < x) & (x < 0.1), u, copy=False)
v = np.ma.masked_where((0.1 < y) & (y < 0.7), v, copy=False)
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_pivot_middle'], remove_text=True,
extensions=['png'])
def test_quiver3d_pivot_middle():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1, pivot='middle')
@image_comparison(baseline_images=['quiver3d_pivot_tail'], remove_text=True,
extensions=['png'])
def test_quiver3d_pivot_tail():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1, pivot='tail')
@image_comparison(baseline_images=['axes3d_labelpad'], extensions=['png'])
def test_axes3d_labelpad():
from nose.tools import assert_equal
from matplotlib import rcParams
fig = plt.figure()
ax = Axes3D(fig)
# labelpad respects rcParams
assert_equal(ax.xaxis.labelpad, rcParams['axes.labelpad'])
# labelpad can be set in set_label
ax.set_xlabel('X LABEL', labelpad=10)
assert_equal(ax.xaxis.labelpad, 10)
ax.set_ylabel('Y LABEL')
ax.set_zlabel('Z LABEL')
# or manually
ax.yaxis.labelpad = 20
ax.zaxis.labelpad = -40
# Tick labels also respect tick.pad (also from rcParams)
for i, tick in enumerate(ax.yaxis.get_major_ticks()):
tick.set_pad(tick.get_pad() - i * 5)
@image_comparison(baseline_images=['axes3d_cla'], extensions=['png'])
def test_axes3d_cla():
# fixed in pull request 4553
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
ax.set_axis_off()
ax.cla() # make sure the axis displayed is 3D (not 2D)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
mssalvador/WorkflowCleaning | semisupervised/batch_generative_model.py | 1 | 3287 | def semi_supervised_batch_single_classifier_generate_approach(data,
featureCols=None,
labelCol='used_label',
predictionCol='prediction',
*args,
**kwargs):
"""
A first approach to a semi-supervised learning method. Uses a k-means combined with logistic regression to find
the best classification of the data.
@input: data: spark dataframe with missing lables, but all are missing!
@input: featureCols:
@input: labelCol:
@input: predictionCol:
returns spark dataframe with classified data, with help from the clustering method
"""
import numpy as np
import pandas as pd
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from pyspark.ml import clustering
from pyspark.ml import feature
from pyspark.ml import Pipeline
from pyspark.ml import classification
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
assert labelCol in data.columns, 'Lables are missing please provide a label column!'
assert isinstance(data, DataFrame), 'Data is not of type Spark.DataFrame, but {}'.format(type(data))
assert featureCols is not None, 'Please give a list of features as string!'
cluster_model = kwargs.get('clusterModel','KMeans') #TODO Future stuf that makes our semi supervised more dynamic
classification_model = kwargs.get('classificationModel','LogisticRegression')
k_clusters = (data
.filter((F.col(labelCol) != np.NaN))
.groupBy(labelCol)
.count()
.count()
)
print(k_clusters)
# Feature vectorizer and k-means model is initialized here!
feature_vector = feature.VectorAssembler(
inputCols=featureCols,
outputCol='features')
k_means = clustering.KMeans(
featuresCol=feature_vector.getOutputCol(),
predictionCol='Kmeans_prediction',
k=k_clusters)
# Classification begins here!
log_reg = classification.LogisticRegression(
featuresCol=feature_vector.getOutputCol(),
labelCol=k_means.getPredictionCol(),
predictionCol=predictionCol)
# Pipeline get assembled here!
pipeline = Pipeline(stages=[feature_vector, k_means, log_reg])
# CrossValidation gets build here!
param_grid = (ParamGridBuilder()
.addGrid(log_reg.regParam, [0.1, 0.01])
.build()
)
evaluator = BinaryClassificationEvaluator(
rawPredictionCol=log_reg.getRawPredictionCol(),
labelCol=k_means.getPredictionCol())
folds = kwargs.get('folds', 3)
cross_validator = CrossValidator(
estimator=pipeline,
estimatorParamMaps=param_grid,
evaluator=evaluator,
numFolds=folds)
evaluated_pipeline = cross_validator.fit(data)
cluster_fitted_data = evaluated_pipeline.transform(data)
return cluster_fitted_data | apache-2.0 |
jianrongdeng/LAMOST | ana/scripts/histogram_fits-image.py | 1 | 2680 | """
=======================================
Purpose: make a histogram of fits image
=======================================
Input: fits image file
Output: histogram of pixel values
-------------------
*By: Jianrong Deng 20170601
-------------------
"""
import numpy as np
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
#from astropy.visualization import astropy_mpl_style
#plt.style.use(astropy_mpl_style)
# read in fits data file
from astropy.io import fits
image_file = '/Users/jdeng/baiduCloudDisk/LAMOST/data/20150923/bias/rb-16r-20150923235754-10000-82496157.fit.gz'
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
"""
### option for 'bins' in numpy histogram:
‘auto’: Maximum of the ‘sturges’ and ‘fd’ estimators. Provides good all around performance.
‘fd’ (Freedman Diaconis Estimator): Robust (resilient to outliers) estimator that takes into account data variability and data size.
‘sturges’: R’s default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html#numpy.histogram
"""
# histogram our data with numpy
#
#hist, bins = np.histogram (image_data, 'auto')
#for i in range(len(bins)-1):
# print ( i, '\t\t:', bins[i], '\t\t: ', hist[i])
##############################################################################
# plot the histogram
plt.figure()
#plt.hist(image_data.flatten(), bins=400, range=[2100, 2500])
plt.hist(image_data.flatten(), bins=50)
#plt.colorbar()
#plt.xscale('log')
plt.yscale('log')
plt.show()
| gpl-3.0 |
cpaulik/scipy | scipy/spatial/_plotutils.py | 53 | 4034 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
for simplex in hull.simplices:
ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-')
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-')
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/sandbox/examples/try_quantile_regression.py | 33 | 1302 | '''Example to illustrate Quantile Regression
Author: Josef Perktold
'''
import numpy as np
from statsmodels.compat.python import zip
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 5
nobs, k_vars = 500, 5
x = np.random.randn(nobs, k_vars)
#x[:,0] = 1
y = x.sum(1) + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
exog = np.column_stack((np.ones(nobs), x))
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.25)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
##print 'ols ', res_ols.params
##print '0.25', res_qr2
##print '0.5 ', res_qr
##print '0.75', res_qr3
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
#sortidx = np.argsort(y)
fitted_ols = np.dot(res_ols.model.exog, params[0])
sortidx = np.argsort(fitted_ols)
x_sorted = res_ols.model.exog[sortidx]
fitted_ols = np.dot(x_sorted, params[0])
plt.figure()
plt.plot(y[sortidx], 'o', alpha=0.75)
for lab, beta in zip(['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(x_sorted, beta)
lw = 2 if lab == 'ols' else 1
plt.plot(fitted, lw=lw, label=lab)
plt.legend()
plt.show()
| bsd-3-clause |
neishm/EC-CAS-diags | eccas_diags/diagnostics/diurnal_cycle.py | 1 | 6056 | ###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
# Diurnal cycle diagnostic
def have_regular_time_axis(d):
from pygeode.timeaxis import Time
from pygeode.timeutils import reltime
import numpy as np
# Omit data with no time information.
if 'time' not in d: return False
# Omit data where the time is not a proper axis (e.g. for ObsPack).
if not isinstance(d.time,Time): return False
# Check the time of day for regular intervals
hours = sorted(set(reltime(d.time,units='hours')%24))
if len(set(np.diff(hours))) == 1: return True
return False
# Start with the timeseries diagnostic, and further process the data to get
# the diurnal cycle.
from .timeseries import Timeseries
class DiurnalCycle(Timeseries):
"""
Mean diurnal cycle, sampled at obs locations.
"""
# Only use data that has a regularly-spaced time axis.
def _check_dataset (self, dataset):
if super(DiurnalCycle,self)._check_dataset(dataset) is False:
return False
return have_regular_time_axis(dataset)
# Compute a diurnal mean.
# Takes a PyGeode Var object as input.
# Returns the hour of the day, and the diurnal mean data as numpy arrays.
# Note: only works on one timeseries at a time.
@staticmethod
def compute_diurnal_mean_stddev (var):
import numpy as np
from pygeode.timeutils import reltime
assert len(var.axes) == 1
hours = reltime(var.time, units='hours')
hours_mod = hours%24
data = var.get()
diurnal_hours = sorted(set(hours_mod))
mean = []
stddev = []
for h in diurnal_hours:
current_data = data[hours_mod==h]
count = np.nansum(np.isfinite(current_data))
m = np.nansum(current_data) / count
mean.append (m)
v = np.nansum((current_data-m)**2)/(count-1)
stddev.append (np.sqrt(v))
# Wrap around to the start of the next day (complete cycle)
# Also, wrap to the end of the previous day, in case the first hour is > 0.
if len(diurnal_hours) > 0:
diurnal_hours = [diurnal_hours[-1]-24] + diurnal_hours + [diurnal_hours[0]+24]
mean = [mean[-1]] + mean + [mean[0]]
stddev = [stddev[-1]] + stddev + [stddev[0]]
return np.array(diurnal_hours), np.array(mean), np.array(stddev)
# Do the diurnal cycle plots.
def do (self, inputs):
from ..common import long_monthnames
from matplotlib import pyplot as pl
from os.path import exists
from os import mkdir
outdir = self.outdir + '/diurnal-cycle' + self.suffix + self.end_suffix
if not exists(outdir): mkdir(outdir)
# Determine years for comparisons
years = set()
for inp in inputs:
for d in inp.datasets:
t = d.vars[0].getaxis('time')
for y in set(t.year):
if sum(t.year==y) > 10: years.add(y)
years = sorted(years)
# Extract the data for each station,year,month.
# Compute the diurnal means and do the plot.
nstations = len(inputs[0].datasets)
for i in range(nstations):
station = inputs[0].datasets[i].station.station[0]
for year in years:
outfile = "%s/%s_diurnal_cycle_%s_at_%s_for_%04d%s%s.%s"%(outdir,'_'.join(d.name for d in inputs), self.fieldname, station.replace('/','^'), year, self.suffix, self.end_suffix, self.image_format)
if exists(outfile): continue
fig = pl.figure(figsize=(10,10))
title = "%s diurnal cycle at %s (%04d)"%(self.fieldname,station,year)
# Fix issue with certain characters in station names
title = title.decode('latin-1')
pl.suptitle (title, fontsize=18)
for month, month_string in long_monthnames:
if month <= 6: plotnum = 2*month-1
else: plotnum = 2*(month-6)
pl.subplot(6,2,plotnum)
pl.title(month_string)
for inp in inputs:
data = inp.datasets[i][self.fieldname](station=station)(year=year,month=month).squeeze()
if len(data.axes) == 0: continue
hours, data, std = self.compute_diurnal_mean_stddev(data)
pl.plot(hours, data, color=inp.color, linestyle=inp.linestyle, linewidth=2, marker=inp.marker, markersize=10, markeredgecolor=inp.color, label=inp.title)
if inp.std_style == 'lines':
pl.plot(hours, data+std, color=inp.color, linestyle='--')
pl.plot(hours, data-std, color=inp.color, linestyle='--')
if inp.std_style == 'shade':
pl.fill_between(hours, data-std, data+std, color=inp.color, alpha=0.2, linewidth=0)
hourticks = range(0,26,2)
if plotnum in (11,12):
pl.xticks(hourticks)
pl.xlabel('hour')
else:
pl.xticks(hourticks,['']*len(hourticks))
pl.xlim(0,24)
if plotnum%2 == 1:
pl.ylabel('[%s]'%self.units)
# Don't use matplotlib's axis label offset (looks ugly).
# http://stackoverflow.com/questions/24171064/matplotlib-remove-axis-label-offset-by-default
pl.gca().get_yaxis().get_major_formatter().set_useOffset(False)
fig.savefig(outfile)
pl.close(fig)
from . import table
table['diurnal-cycle'] = DiurnalCycle
| lgpl-3.0 |
quantopian/odo | odo/backends/tests/test_pandas.py | 3 | 4361 | from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from datetime import datetime, timedelta
from distutils.version import LooseVersion
from datashape import (
Categorical,
DateTime,
Option,
Record,
discover,
dshape,
float64,
int64,
coretypes as ct,
)
from datashape.util.testing import assert_dshape_equal
from networkx import NetworkXNoPath
import numpy as np
import pandas as pd
import pytest
from odo import odo
requires_datetimetz = pytest.mark.skipif(
LooseVersion(pd.__version__) < LooseVersion('0.17'),
reason="Pandas before DatetimeTZ",
)
data = [('Alice', 100), ('Bob', 200)]
def test_discover_dataframe():
df = pd.DataFrame([('Alice', 100), ('Bob', 200)],
columns=['name', 'amount'])
assert discover(df) == dshape('2 * {name: ?string, amount: int64}')
def test_discover_series():
s = pd.Series([1, 2, 3])
assert discover(s) == 3 * discover(s[0])
def test_floats_are_not_optional():
df = pd.DataFrame([('Alice', 100), ('Bob', None)],
columns=['name', 'amount'])
ds = discover(df)
assert_dshape_equal(ds[1].types[1], float64)
def test_datetime_to_timestamp():
dt = datetime(2014, 1, 1)
ts = odo(dt, pd.Timestamp)
assert isinstance(ts, pd.Timestamp)
assert ts == pd.Timestamp('2014-01-01')
def test_nan_to_nat():
assert odo(float('nan'), pd.Timestamp) is pd.NaT
assert odo(float('nan'), pd.Timedelta) is pd.NaT
assert odo(np.nan, pd.Timestamp) is pd.NaT
assert odo(np.nan, pd.Timedelta) is pd.NaT
with pytest.raises(NetworkXNoPath):
# Check that only nan can be converted.
odo(0.5, pd.Timestamp)
with pytest.raises(NetworkXNoPath):
# Check that only nan can be converted.
odo(0.5, pd.Timedelta)
def test_none_to_nat():
assert odo(None, pd.Timestamp) is pd.NaT
assert odo(None, pd.Timedelta) is pd.NaT
def test_nat_to_nat():
assert odo(pd.NaT, pd.Timestamp) is pd.NaT
assert odo(pd.NaT, pd.Timedelta) is pd.NaT
def test_timedelta_to_pandas():
assert odo(timedelta(days=1), pd.Timedelta) == pd.Timedelta(days=1)
assert odo(timedelta(hours=1), pd.Timedelta) == pd.Timedelta(hours=1)
assert odo(timedelta(seconds=1), pd.Timedelta) == pd.Timedelta(seconds=1)
def test_categorical_pandas():
df = pd.DataFrame({'x': list('a'*5 + 'b'*5 + 'c'*5),
'y': np.arange(15, dtype=np.int64)},
columns=['x', 'y'])
df.x = df.x.astype('category')
assert_dshape_equal(discover(df), 15 * Record([('x',
Categorical(['a', 'b', 'c'])), ('y', int64)]))
assert_dshape_equal(discover(df.x), 15 * Categorical(['a', 'b', 'c']))
@requires_datetimetz
def test_datetimetz_pandas():
df = pd.DataFrame(
OrderedDict([
('naive', pd.date_range('2014', periods=5)),
('Europe/Moscow', pd.date_range('2014', periods=5, tz='Europe/Moscow')),
('UTC', pd.date_range('2014', periods=5, tz='UTC')),
('US/Eastern', pd.date_range('2014', periods=5, tz='US/Eastern')),
])
)
assert_dshape_equal(
discover(df),
5 * Record[
'naive': Option(DateTime(tz=None)),
'Europe/Moscow': Option(DateTime(tz='Europe/Moscow')),
'UTC': Option(DateTime(tz='UTC')),
'US/Eastern': Option(DateTime(tz='US/Eastern')),
]
)
assert_dshape_equal(discover(df.naive), 5 * Option(DateTime(tz=None)))
for tz in ('Europe/Moscow', 'UTC', 'US/Eastern'):
assert_dshape_equal(
discover(df[tz]),
5 * Option(DateTime(tz=tz))
)
@pytest.mark.parametrize('dtype', ('int64', 'float64'))
def test_numeric_index(dtype):
ix = pd.Index([1, 2, 3], dtype=dtype)
actual = discover(ix)
expected = 3 * getattr(ct, dtype)
assert_dshape_equal(actual, expected)
@pytest.mark.parametrize(
'tz', (
None,
requires_datetimetz('US/Eastern'),
requires_datetimetz('UTC'),
requires_datetimetz('Europe/Moscow'),
),
)
def test_datetime_index(tz):
ix = pd.DatetimeIndex(['2014-01-01', '2014-01-02', '2014-01-03'], tz=tz)
actual = discover(ix)
expected = 3 * Option(DateTime(tz=tz))
assert_dshape_equal(actual, expected)
| bsd-3-clause |
sgmap/openfisca-france | setup.py | 1 | 1859 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "OpenFisca-France",
version = "42.2.0",
author = "OpenFisca Team",
author_email = "[email protected]",
classifiers = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Information Analysis",
],
description = "French tax and benefit system for OpenFisca",
keywords = "benefit france microsimulation social tax",
license = "http://www.fsf.org/licensing/licenses/agpl-3.0.html",
url = "https://github.com/openfisca/openfisca-france",
data_files = [
("share/openfisca/openfisca-france", ["CHANGELOG.md", "LICENSE.AGPL.txt", "README.md"]),
],
extras_require = {
"inversion_revenus": [
"scipy >= 0.17",
],
"de_net_a_brut": [
"scipy >= 0.17",
],
"taxipp": [
"pandas >= 0.13",
],
"dev": [
"autopep8 ==1.4.4",
"flake8 >=3.7.0,<3.8.0",
"flake8-print",
"pytest <5.0",
"scipy >= 0.17", # Only used to test de_net_a_brut reform
"requests >= 2.8",
"yamllint >=1.11.1,<1.16"
],
},
include_package_data = True, # Will read MANIFEST.in
install_requires = [
"OpenFisca-Core >=31.0,<35.0",
],
message_extractors = {"openfisca_france": [
("**.py", "python", None),
]},
packages = find_packages(exclude=["openfisca_france.tests*"]),
)
| agpl-3.0 |
jzt5132/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
marcocaccin/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
Candihub/pixel | apps/explorer/utils/export.py | 1 | 9706 | import pandas
import re
import uuid
import yaml
import zipfile
from io import BytesIO, StringIO
from django.db.models import Q
from django.utils.translation import ugettext as _
from apps.core.models import Pixel
from apps.explorer.templatetags.explorer import highlight_terms
PIXELSET_EXPORT_META_FILENAME = 'meta.yaml'
PIXELSET_EXPORT_PIXELS_FILENAME = 'pixels.csv'
def _get_pixelsets_dataframe_and_metadata(pixel_set_ids,
search_terms=None,
descriptions=dict(),
with_links=False):
"""The function takes Pixel Set IDs and optionally a list of search terms
like Omics Units identifiers or terms in descriptions, a hash map of Pixel
Set descriptions, and a boolean to determine whether to build URLs for
omics units. This function returns a pandas.DataFrame and a hash map
containing metadata related to the Pixel Sets.
The list of Omics Units should contain identifiers and will be used to
filter the pixels.
Parameters
----------
pixel_set_ids: list
A list of Pixel Set ids.
search_terms: list, optional
A list of search terms.
descriptions: dict, optional
A hash map containing Pixel Set descriptions indexed by ID.
with_links: bool, optional
Whether the omics units should have URLs or not.
Returns
-------
df: pandas.DataFrame
A pandas DataFrame.
meta: dict
A hash map indexed by Pixel Set ID. Values are dict with information
for each Pixel Set.
"""
columns = ['Omics Unit', 'Description']
indexes = set()
meta = dict()
pixels = dict()
# we build a dict with the pixel information for each omics unit, and we
# compute the list of indexes and columns to construct the pandas dataframe
# after. It is better to prepare these information than to dynamically
# build the dataframe.
for index, pixel_set_id in enumerate(pixel_set_ids):
if not isinstance(pixel_set_id, uuid.UUID):
short_id = uuid.UUID(pixel_set_id).hex[:7]
else:
short_id = pixel_set_id.hex[:7]
value_col = f'Value {short_id}'
score_col = f'QS {short_id}'
# add columns for this pixel set
columns.append(value_col)
columns.append(score_col)
# add metadata for this pixel set
meta[short_id] = {
'columns': [(index * 2) + 1, (index * 2) + 2],
'pixelset': short_id,
'description': descriptions.get(pixel_set_id, ''),
}
if short_id not in pixels:
pixels[short_id] = []
qs = Pixel.objects.filter(
pixel_set_id=pixel_set_id
).select_related(
'omics_unit__reference'
).order_by(
'omics_unit__reference__identifier'
)
qs = get_queryset_filtered_by_search_terms(
qs,
search_terms=search_terms
)
for pixel in qs:
omics_unit = pixel.omics_unit.reference.identifier
description = pixel.omics_unit.reference.description
link = '<a href="{}">{}</a>'.format(
pixel.omics_unit.reference.url,
omics_unit,
)
pixels[short_id].append({
'description': description.replace('\n', ' '),
'link': link,
'omics_unit': omics_unit,
'quality_score': pixel.quality_score,
'value': pixel.value,
})
indexes.add(omics_unit)
df = pandas.DataFrame(index=sorted(indexes), columns=columns)
if indexes:
# populate the dataframe with each pixel information
for short_id in pixels.keys():
for pixel in pixels[short_id]:
df.loc[
pixel['omics_unit'],
[
'Omics Unit',
'Description',
f'Value {short_id}',
f'QS {short_id}',
]
] = [
pixel['link'] if with_links else pixel['omics_unit'],
pixel['description'],
pixel['value'],
pixel['quality_score'],
]
# drop the indexes to get numerical indexes instead of omics units (so
# that we have numbers displayed in the HTML table)
df = df.reset_index(drop=True)
return df, meta
def get_queryset_filtered_by_search_terms(qs, search_terms=None):
# we only filter by search terms when specified
if search_terms:
clauses = Q(
omics_unit__reference__description__icontains=search_terms[0]
)
for term in search_terms[1:]:
clauses &= Q(omics_unit__reference__description__icontains=term)
qs = qs.filter(
Q(omics_unit__reference__identifier__in=search_terms) | clauses
)
return qs
def export_pixelsets(pixel_sets, search_terms=None):
"""This function exports a list of PixelSet objects as a ZIP archive.
The (in-memory) ZIP archive contains a `meta.yaml` file and a `pixels.csv`
file according to this spec: https://github.com/Candihub/pixel/issues/144.
Parameters
----------
pixel_sets : iterable
A sequence, an iterator, or some other object which supports iteration,
containing PixelSet objects.
search_terms: list, optional
A list of search terms.
Returns
-------
io.BytesIO
A Binary I/O containing the ZIP archive.
"""
descriptions = {}
for pixel_set in pixel_sets:
descriptions[pixel_set.id] = pixel_set.description
df, pixelsets_meta = _get_pixelsets_dataframe_and_metadata(
pixel_set_ids=descriptions.keys(),
search_terms=search_terms,
descriptions=descriptions,
)
stream = BytesIO()
archive = zipfile.ZipFile(
stream,
mode='w',
compression=zipfile.ZIP_DEFLATED
)
# add `meta.yaml` file
archive.writestr(
PIXELSET_EXPORT_META_FILENAME,
yaml.dump({'pixelsets': list(pixelsets_meta.values())})
)
csv = StringIO()
df.to_csv(
path_or_buf=csv,
na_rep='NA',
index=False,
)
# add `pixels.csv` file
archive.writestr(PIXELSET_EXPORT_PIXELS_FILENAME, csv.getvalue())
archive.close()
return stream
def export_pixels(pixel_set, search_terms=None, output=None):
"""This function exports the Pixels of a given PixelSet as a CSV file.
If the list of `search_terms` is empty, all Pixels will be exported.
Parameters
----------
pixel_set : apps.core.models.PixelSet
A PixelSet object.
search_terms: list, optional
A list of search terms.
output : String or File handler, optional
A string or file handler to write the CSV content.
Returns
-------
io.StringIO
A String I/O containing the CSV file if `output` is not specified,
`output` otherwise.
"""
qs = pixel_set.pixels.select_related('omics_unit__reference')
qs = get_queryset_filtered_by_search_terms(qs, search_terms=search_terms)
data = list(
qs.values_list(
'omics_unit__reference__identifier',
'value',
'quality_score',
)
)
df = pandas.DataFrame(data, columns=('Omics Unit', 'Value', 'QS', ))
if output is None:
output = StringIO()
df.to_csv(
path_or_buf=output,
na_rep='NA',
index=False,
)
return output
def export_pixelsets_as_html(pixel_set_ids,
search_terms=None,
display_limit=None):
"""This function creates a HTML table displaying the pixels related to the
set of Pixel Sets given as first argument.
The function takes Pixel Set IDs, and optionally a list of Omics Units and
a number of rows to render in the HTML table.
The list of Omics Units should contain identifiers and will be used to
filter the pixels.
Parameters
----------
pixel_set_ids: list
A list of Pixel Set ids.
search_terms: list, optional
A list of search terms.
display_limit: int
The number of rows to render in the HTML table.
Returns
-------
str
A string containing the HTML table (not escaped).
"""
# tell pandas not to truncate strings
pandas.set_option('display.max_colwidth', -1)
# tell pandas not to format floats
pandas.set_option('display.float_format', lambda val: '{}'.format(val))
df, __ = _get_pixelsets_dataframe_and_metadata(
pixel_set_ids,
search_terms=search_terms,
with_links=True,
)
html = df.to_html(
escape=False,
max_rows=display_limit,
formatters={
# Highlight search terms in description column
'Description': lambda description: highlight_terms(
description,
search_terms
),
},
).replace(' border="1"', '') # pandas hardcodes table borders...
# replace the empty table body with a message.
if len(df.index) == 0:
html = re.sub(
r'<tbody>\s*\n\s*</tbody>',
(
'<tbody>'
'<tr class="empty"><td colspan="{}">{}</td></tr>'
'</tbody>'
).format(
len(df.columns)+1,
_("Your selection gave no results"),
),
html
)
return html
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/externals/joblib/parallel.py | 86 | 35087 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
shahankhatch/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
josherick/bokeh | examples/glyphs/anscombe.py | 39 | 2945 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle, Line
from bokeh.models import (
ColumnDataSource, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr,
title=title, plot_width=400, plot_height=400,
border_fill='white', background_fill='#e9e0db'
)
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]])
doc = Document()
doc.add(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
holsety/tushare | tushare/datayes/future.py | 17 | 1740 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Future():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Futu(self, exchangeCD='', secID='', ticker='', contractObject='', field=''):
"""
获取国内四大期货交易所期货合约的基本要素信息,
包括合约名称、合约代码、合约类型、合约标的、报价单位、最小变动价位、涨跌停板幅度、交易货币、
合约乘数、交易保证金、上市日期、最后交易日、交割日期、交割方式、交易手续费、交割手续费、挂牌基准价、合约状态等。
"""
code, result = self.client.getData(vs.FUTU%(exchangeCD, secID, ticker, contractObject, field))
return _ret_data(code, result)
def FutuConvf(self, secID='', ticker='', field=''):
"""
获取国债期货转换因子信息,包括合约可交割国债名称、可交割国债交易代码、转换因子等。
"""
code, result = self.client.getData(vs.FUTUCONVF%(secID, ticker, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
pschella/scipy | scipy/spatial/_plotutils.py | 23 | 5505 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = ax.ishold()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points: bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha: float, optional
Specifies the line alpha for polygon boundaries
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
ax.plot(vor.points[:,0], vor.points[:,1], '.')
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/model_selection/tests/test_split.py | 5 | 54563 | """Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from itertools import combinations
from itertools import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.utils.fixes import comb
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size='default',\n train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups)
assert_raise_message(ValueError, msg,
lpo.get_n_splits, None, y, groups)
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
# Test if the two splits are different
# numpy's assert_equal properly compares nested lists
try:
np.testing.assert_array_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
@ignore_warnings
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
def test_stratified_shuffle_split_multilabel():
# fix for issue 9037
for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
# correct stratification of entire rows
# (by design, here y[:, 0] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 0])
assert_equal(expected_ratio, np.mean(y_train[:, 0]))
assert_equal(expected_ratio, np.mean(y_test[:, 0]))
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert_equal(repr(logo), 'LeaveOneGroupOut()')
assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)')
assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)')
assert_equal(repr(LeavePGroupsOut(n_groups=3)),
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits)
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert_equal(len(train) + len(test), len(groups_i))
# Third test:
# The number of groups in test must be equal to p_groups_out
assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out)
# check get_n_splits() with dummy parameters
assert_equal(logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']), 3)
assert_equal(logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]), 3)
assert_equal(lpgo_2.get_n_splits(None, None, np.arange(4)), 6)
assert_equal(lpgo_1.get_n_splits(groups=np.arange(4)), 4)
# raise ValueError if a `groups` parameter is illegal
with assert_raises(ValueError):
logo.get_n_splits(None, None, [0.0, np.nan, 0.0])
with assert_raises(ValueError):
lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0])
msg = "The 'groups' parameter should not be None."
assert_raise_message(ValueError, msg,
logo.get_n_splits, None, None, None)
assert_raise_message(ValueError, msg,
lpgo_1.get_n_splits, None, None, None)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(
3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups))
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ([ 1.]). "
"LeaveOneGroupOut expects at least 2.")
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([ 1.]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ([0 1 2]). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups be present")
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
@ignore_warnings
def test_repeated_cv_value_errors():
# n_repeats is not integer or <= 0
for cv in (RepeatedKFold, RepeatedStratifiedKFold):
assert_raises(ValueError, cv, n_repeats=0)
assert_raises(ValueError, cv, n_repeats=1.5)
def test_repeated_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
random_state = 258173307
rkf = RepeatedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rkf.split(X)
train, test = next(splits)
assert_array_equal(train, [2, 4])
assert_array_equal(test, [0, 1, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 3])
assert_array_equal(test, [2, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3, 4])
assert_array_equal(test, [0, 1])
assert_raises(StopIteration, next, splits)
def test_get_n_splits_for_repeated_kfold():
n_splits = 3
n_repeats = 4
rkf = RepeatedKFold(n_splits, n_repeats)
expected_n_splits = n_splits * n_repeats
assert_equal(expected_n_splits, rkf.get_n_splits())
def test_get_n_splits_for_repeated_stratified_kfold():
n_splits = 3
n_repeats = 4
rskf = RepeatedStratifiedKFold(n_splits, n_repeats)
expected_n_splits = n_splits * n_repeats
assert_equal(expected_n_splits, rskf.get_n_splits())
def test_repeated_stratified_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
y = [1, 1, 1, 0, 0]
random_state = 1944695409
rskf = RepeatedStratifiedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rskf.split(X, y)
train, test = next(splits)
assert_array_equal(train, [1, 4])
assert_array_equal(test, [0, 2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 2, 3])
assert_array_equal(test, [1, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3])
assert_array_equal(test, [0, 1, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 4])
assert_array_equal(test, [2, 3])
assert_raises(StopIteration, next, splits)
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
assert_raises(ValueError, train_test_split, range(10),
shuffle=False, stratify=True)
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
# test unshuffled split
y = np.arange(10)
for test_size in [2, 0.2]:
train, test = train_test_split(y, shuffle=False, test_size=test_size)
assert_array_equal(test, [8, 9])
assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7])
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
@ignore_warnings
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
kf_iter = KFold(n_splits=5).split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
# numpy's assert_array_equal properly compares nested lists
np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
try:
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
splits_are_equal = True
except AssertionError:
splits_are_equal = False
assert_false(splits_are_equal, "If the splits are randomized, "
"successive calls to split should yield different results")
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def _check_time_series_max_train_size(splits, check_splits, max_train_size):
for (train, test), (check_train, check_test) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert_true(len(check_train) <= max_train_size)
suffix_start = max(len(train) - max_train_size, 0)
assert_array_equal(check_train, train[suffix_start:])
def test_time_series_max_train_size():
X = np.zeros((6, 1))
splits = TimeSeriesSplit(n_splits=3).split(X)
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=3)
# Test for the case where the size of a fold is greater than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
# Test for the case where the size of each fold is less than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_train_test_default_warning():
assert_warns(FutureWarning, ShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, GroupShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, StratifiedShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, train_test_split, range(3),
train_size=0.75)
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| bsd-3-clause |
itanoss/noritor-core | tests/test_generator.py | 1 | 1245 | import os
import shutil
import pytest
from noritor.configuration import Configuration
from noritor.docker import DockerCommander
from noritor.generator import Generator
work_dir = os.path.join(os.getcwd(), 'test_build')
docker_commander = DockerCommander()
@pytest.fixture
def generator():
configuration = Configuration(root_dir=work_dir)
generator = Generator('Radiator', configuration=configuration, docker_commander=docker_commander)
generator.initialize()
yield generator
# teardown
shutil.rmtree(configuration.root_dir)
def test_run_after_commit(generator):
generator.repository.write_code('''
import pandas_datareader.data as web
import datetime
db_name = 'test_database'
collection_name = 'Quotes'
# Gathering data
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime.now()
f = web.DataReader("F", 'yahoo', start, end)
print(f)
''')
generator.repository.write_requirements('pandas_datareader')
generator.repository.write_dockerfile()
generator.repository.commit('Create crawling from yahoo finance')
container = generator.run()
assert container['Id'] is not None
# teardown
import subprocess
subprocess.call('docker stop $(docker ps -q)', shell=True)
| gpl-3.0 |
perimosocordiae/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 12 | 1249 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
rng = np.random.default_rng()
x1 = rng.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200, random_state=rng) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
utiasSTARS/pykitti | demos/demo_raw.py | 1 | 2939 | """Example of pykitti.raw usage."""
import itertools
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import pykitti
__author__ = "Lee Clement"
__email__ = "[email protected]"
# Change this to the directory where you store KITTI data
basedir = '/Users/leeclement/Desktop/KITTI/raw'
# Specify the dataset to load
date = '2011_09_30'
drive = '0034'
# Load the data. Optionally, specify the frame range to load.
# dataset = pykitti.raw(basedir, date, drive)
dataset = pykitti.raw(basedir, date, drive, frames=range(0, 20, 5))
# dataset.calib: Calibration data are accessible as a named tuple
# dataset.timestamps: Timestamps are parsed into a list of datetime objects
# dataset.oxts: List of OXTS packets and 6-dof poses as named tuples
# dataset.camN: Returns a generator that loads individual images from camera N
# dataset.get_camN(idx): Returns the image from camera N at idx
# dataset.gray: Returns a generator that loads monochrome stereo pairs (cam0, cam1)
# dataset.get_gray(idx): Returns the monochrome stereo pair at idx
# dataset.rgb: Returns a generator that loads RGB stereo pairs (cam2, cam3)
# dataset.get_rgb(idx): Returns the RGB stereo pair at idx
# dataset.velo: Returns a generator that loads velodyne scans as [x,y,z,reflectance]
# dataset.get_velo(idx): Returns the velodyne scan at idx
# Grab some data
second_pose = dataset.oxts[1].T_w_imu
first_gray = next(iter(dataset.gray))
first_cam1 = next(iter(dataset.cam1))
first_rgb = dataset.get_rgb(0)
first_cam2 = dataset.get_cam2(0)
third_velo = dataset.get_velo(2)
# Display some of the data
np.set_printoptions(precision=4, suppress=True)
print('\nDrive: ' + str(dataset.drive))
print('\nFrame range: ' + str(dataset.frames))
print('\nIMU-to-Velodyne transformation:\n' + str(dataset.calib.T_velo_imu))
print('\nGray stereo pair baseline [m]: ' + str(dataset.calib.b_gray))
print('\nRGB stereo pair baseline [m]: ' + str(dataset.calib.b_rgb))
print('\nFirst timestamp: ' + str(dataset.timestamps[0]))
print('\nSecond IMU pose:\n' + str(second_pose))
f, ax = plt.subplots(2, 2, figsize=(15, 5))
ax[0, 0].imshow(first_gray[0], cmap='gray')
ax[0, 0].set_title('Left Gray Image (cam0)')
ax[0, 1].imshow(first_cam1, cmap='gray')
ax[0, 1].set_title('Right Gray Image (cam1)')
ax[1, 0].imshow(first_cam2)
ax[1, 0].set_title('Left RGB Image (cam2)')
ax[1, 1].imshow(first_rgb[1])
ax[1, 1].set_title('Right RGB Image (cam3)')
f2 = plt.figure()
ax2 = f2.add_subplot(111, projection='3d')
# Plot every 100th point so things don't get too bogged down
velo_range = range(0, third_velo.shape[0], 100)
ax2.scatter(third_velo[velo_range, 0],
third_velo[velo_range, 1],
third_velo[velo_range, 2],
c=third_velo[velo_range, 3],
cmap='gray')
ax2.set_title('Third Velodyne scan (subsampled)')
plt.show()
| mit |
sarahgrogan/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/tree/export.py | 4 | 4627 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Licence: BSD 3 clause
from warnings import warn
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None, close=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
if close is not None:
warn("The close parameter is deprecated as of version 0.14 "
"and will be removed in 0.16.", DeprecationWarning)
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
| apache-2.0 |
jblackburne/scikit-learn | sklearn/semi_supervised/label_propagation.py | 17 | 15941 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
soulmachine/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
abhishekgahlot/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 30 | 4516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Model selection with Probabilistic (PCA) and Factor Analysis (FA)
=================================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
vantares/trading-with-python | lib/vixFutures.py | 79 | 4157 | # -*- coding: utf-8 -*-
"""
set of tools for working with VIX futures
@author: Jev Kuznetsov
Licence: GPL v2
"""
import datetime as dt
from pandas import *
import os
import urllib2
#from csvDatabase import HistDataCsv
m_codes = dict(zip(range(1,13),['F','G','H','J','K','M','N','Q','U','V','X','Z'])) #month codes of the futures
monthToCode = dict(zip(range(1,len(m_codes)+1),m_codes))
def getCboeData(year,month):
''' download data from cboe '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
# first column is date, second is future , skip these
header = lines[0].strip().split(',')[2:]
dates = []
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.strip().split(',')
dates.append(datetime.strptime( fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[2:]):
data[i].append(float(field))
data = dict(zip(header,data))
df = DataFrame(data=data, index=Index(dates))
return df
class Future(object):
''' vix future class '''
def __init__(self,year,month):
self.year = year
self.month = month
self.expiration = self._calculateExpirationDate()
self.cboeData = None # daily cboe data
self.intradayDb = None # intraday database (csv)
def _calculateExpirationDate(self):
''' calculate expiration date of the future, (not 100% reliable) '''
t = dt.date(self.year,self.month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
return t_new
def getCboeData(self, dataDir=None, forceUpdate=False):
''' download interday CBOE data
specify dataDir to save data to csv.
data will not be downloaded if csv file is already present.
This can be overridden with setting forceUpdate to True
'''
if dataDir is not None:
fileFound = os.path.exists(self._csvFilename(dataDir))
if forceUpdate or not fileFound:
self.cboeData = getCboeData(self.year, self.month)
self.to_csv(dataDir)
else:
self.cboeData = DataFrame.from_csv(self._csvFilename(dataDir))
else:
self.cboeData = getCboeData(self.year, self.month)
return self.cboeData
def updateIntradayDb(self,dbDir):
#self.intradayDb =
pass
def to_csv(self,dataDir):
''' save to csv in given dir. Filename is automatically generated '''
self.cboeData.to_csv(self._csvFilename(dataDir))
@property
def dates(self):
''' trading days derived from cboe data '''
if self.cboeData is not None:
dates = [d.date() for d in self.cboeData.index]
else:
dates = None
return dates
def _csvFilename(self,dataDir):
fName = "VIX_future_%i_%i.csv" % (self.year, self.month)
return os.path.join(dataDir,fName)
def __repr__(self):
s = 'Vix future [%i-%i (%s)] exp: %s\n' % (self.year, self.month,monthToCode[self.month], self.expiration.strftime("%B, %d %Y (%A)"))
s+= 'Cboe data: %i days'% len(self.cboeData) if self.cboeData is not None else 'No data downloaded yet'
return s
if __name__ == '__main__':
print 'testing vix futures'
year = 2012
month = 12
f = Future(year,month)
f.getCboeData()
print f
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/tseries/frequencies/test_to_offset.py | 1 | 4660 | import re
import pytest
from pandas._libs.tslibs import Timedelta, offsets, to_offset
@pytest.mark.parametrize(
"freq_input,expected",
[
(to_offset("10us"), offsets.Micro(10)),
(offsets.Hour(), offsets.Hour()),
((5, "T"), offsets.Minute(5)),
("2h30min", offsets.Minute(150)),
("2h 30min", offsets.Minute(150)),
("2h30min15s", offsets.Second(150 * 60 + 15)),
("2h 60min", offsets.Hour(3)),
("2h 20.5min", offsets.Second(8430)),
("1.5min", offsets.Second(90)),
("0.5S", offsets.Milli(500)),
("15l500u", offsets.Micro(15500)),
("10s75L", offsets.Milli(10075)),
("1s0.25ms", offsets.Micro(1000250)),
("1s0.25L", offsets.Micro(1000250)),
("2800N", offsets.Nano(2800)),
("2SM", offsets.SemiMonthEnd(2)),
("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)),
("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)),
("2SMS-15", offsets.SemiMonthBegin(2)),
],
)
def test_to_offset(freq_input, expected):
result = to_offset(freq_input)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("-1S", -1), ("-2SM", -2), ("-1SMS", -1), ("-5min10s", -310)]
)
def test_to_offset_negative(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"freqstr",
[
"2h20m",
"U1",
"-U",
"3U1",
"-2-3U",
"-2D:3H",
"1.5.0S",
"2SMS-15-15",
"2SMS-15D",
"100foo",
# Invalid leading +/- signs.
"+-1d",
"-+1h",
"+1",
"-7",
"+d",
"-m",
# Invalid shortcut anchors.
"SM-0",
"SM-28",
"SM-29",
"SM-FOO",
"BSM",
"SM--1",
"SMS-1",
"SMS-28",
"SMS-30",
"SMS-BAR",
"SMS-BYR",
"BSMS",
"SMS--2",
],
)
def test_to_offset_invalid(freqstr):
# see gh-13930
# We escape string because some of our
# inputs contain regex special characters.
msg = re.escape(f"Invalid frequency: {freqstr}")
with pytest.raises(ValueError, match=msg):
to_offset(freqstr)
def test_to_offset_no_evaluate():
with pytest.raises(ValueError, match="Could not evaluate"):
to_offset(("", ""))
@pytest.mark.parametrize(
"freqstr,expected",
[
("2D 3H", offsets.Hour(51)),
("2 D3 H", offsets.Hour(51)),
("2 D 3 H", offsets.Hour(51)),
(" 2 D 3 H ", offsets.Hour(51)),
(" H ", offsets.Hour()),
(" 3 H ", offsets.Hour(3)),
],
)
def test_to_offset_whitespace(freqstr, expected):
result = to_offset(freqstr)
assert result == expected
@pytest.mark.parametrize(
"freqstr,expected", [("00H 00T 01S", 1), ("-00H 03T 14S", -194)]
)
def test_to_offset_leading_zero(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)])
def test_to_offset_leading_plus(freqstr, expected):
result = to_offset(freqstr)
assert result.n == expected
@pytest.mark.parametrize(
"kwargs,expected",
[
(dict(days=1, seconds=1), offsets.Second(86401)),
(dict(days=-1, seconds=1), offsets.Second(-86399)),
(dict(hours=1, minutes=10), offsets.Minute(70)),
(dict(hours=1, minutes=-10), offsets.Minute(50)),
(dict(weeks=1), offsets.Day(7)),
(dict(hours=1), offsets.Hour(1)),
(dict(hours=1), to_offset("60min")),
(dict(microseconds=1), offsets.Micro(1)),
(dict(microseconds=0), offsets.Nano(0)),
],
)
def test_to_offset_pd_timedelta(kwargs, expected):
# see gh-9064
td = Timedelta(**kwargs)
result = to_offset(td)
assert result == expected
@pytest.mark.parametrize(
"shortcut,expected",
[
("W", offsets.Week(weekday=6)),
("W-SUN", offsets.Week(weekday=6)),
("Q", offsets.QuarterEnd(startingMonth=12)),
("Q-DEC", offsets.QuarterEnd(startingMonth=12)),
("Q-MAY", offsets.QuarterEnd(startingMonth=5)),
("SM", offsets.SemiMonthEnd(day_of_month=15)),
("SM-15", offsets.SemiMonthEnd(day_of_month=15)),
("SM-1", offsets.SemiMonthEnd(day_of_month=1)),
("SM-27", offsets.SemiMonthEnd(day_of_month=27)),
("SMS-2", offsets.SemiMonthBegin(day_of_month=2)),
("SMS-27", offsets.SemiMonthBegin(day_of_month=27)),
],
)
def test_anchored_shortcuts(shortcut, expected):
result = to_offset(shortcut)
assert result == expected
| bsd-3-clause |
ales-erjavec/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
charleseidsness/eispice | module/plot.py | 1 | 16841 | #
# Copyright (C) 2006-2007 Cooper Street Innovations Inc.
# Charles Eidsness <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""
This module provides a very basic Tk based plotting tool. It is intended
to be a basic default plotter for eispice, if nothing else is avalible but
I recomend using something more advanced, like matplotlib or the plotter
built into eide if it's ever finished.
Classes:
Plot -- A simple Tk based plotter Widget
Functions:
plot -- Creates a new plot based on an eispice circuit's output.
plot_voltage -- Plot voltages at specific nodes
plot_current -- Plot currents through specific devices
"""
import tkinter as tk
import tkinter.font as tkFont
import numpy
from datetime import datetime
# --------------------------------------------------------------------------- #
# Plotter #
# (A very simple Tk based plotting tool) #
# --------------------------------------------------------------------------- #
def callback_hide(event):
item = event.widget.find_withtag('current')
tag = event.widget.gettags(item)[0]
if event.widget.itemconfig(item, 'fill')[4] == 'grey':
event.widget.itemconfig(tag, state=tk.NORMAL)
event.widget.itemconfig(item, fill='black')
else:
event.widget.itemconfig(tag, state=tk.HIDDEN)
event.widget.itemconfig(item, state=tk.NORMAL)
event.widget.itemconfig(item, fill='grey')
def callback_marker(event):
if ((event.x > event.widget.lMargin) and
(event.x < (event.widget.width - event.widget.rMargin)) and
(event.y > event.widget.tMargin) and
(event.y < (event.widget.height - event.widget.bMargin))):
if event.widget.markerIndex == 2:
event.widget.delete('m0')
event.widget.delete('m1')
event.widget.delete('marker')
event.widget.markerIndex = 0
return
marker = 'm' + str(event.widget.markerIndex)
fill = event.widget.markerColour[event.widget.markerIndex]
event.widget.delete(marker)
event.widget.delete('marker')
event.widget.create_line(event.widget.lMargin, event.y,
event.widget.width - event.widget.rMargin, event.y,
width=1, tags=marker, fill=fill)
event.widget.create_line(event.x, event.widget.tMargin,
event.x, event.widget.height - event.widget.bMargin,
width=1, tags=marker, fill=fill)
event.widget.create_text(event.widget.width-2,
(2+(event.widget.axisFont.cget('size')+5)*
(event.widget.markerIndex+1)),
text='(%.4g, %.4g)' % event.widget.pixel2axis((event.x, event.y)),
font=event.widget.axisFont,
anchor=tk.NE, tags=marker, fill=fill)
event.widget.marker[event.widget.markerIndex] = event.widget.pixel2axis((event.x, event.y))
delta = (event.widget.marker[1][0] - event.widget.marker[0][0],
event.widget.marker[1][1] - event.widget.marker[0][1])
event.widget.create_text(event.widget.width-2,2,
text='D(%.4g, %.4g)' % delta,
font=event.widget.axisFont,
anchor=tk.NE, tags='marker')
event.widget.markerIndex = (event.widget.markerIndex+1)%3
class Plot(tk.Canvas):
"""
A very simple Tk based plotter. This class is specifically
designed for use by the plot function.
Usage Example:
root = tk.Tk()
plot = Plot(root, "Test Plot")
plot.title = 'Title'
plot.subTitle = 'Sub Title'
plot.xAxis = 'X-Axis'
plot.yAxis = 'Y-Axis'
plot.data.append([(1,1),(2,2),(3,3)])
plot.legend.append('Test 1')
plot.data.append([(1,5),(3,12),(3.2,7)])
plot.legend.append('Test 2')
plot.plot()
root.mainloop()
"""
def __init__(self, master=None, title='', subTitle='', xAxis = '',
yAxis = '', bg = 'white', width = 550, height = 300,
lMargin = 75, tMargin = 55, rMargin = 25, bMargin = 55,
lineWidth = 2, xTicks = 5, yTicks = 10,
legend = None, data = None):
"""
Arguments:
master -- Tk root
title -- plot title
"""
tk.Canvas.__init__(self, master, width=width, height=height, bg = bg)
self.bind("<Button-1>", callback_marker)
master.title(title)
self.master = master
self.title=title
self.subTitle=subTitle
self.xAxis = xAxis
self.yAxis = yAxis
self.bg = bg
self.width = width
self.height = height
self.lMargin = lMargin
self.tMargin = tMargin
self.rMargin = rMargin
self.bMargin = bMargin
self.lineWidth = lineWidth
self.xTicks = xTicks
self.yTicks = yTicks
self.colours = ['red', 'navy', 'green', 'orange', 'purple',
'cyan', 'magenta', 'yellow', 'darkred']
if legend == None:
self.legend = []
else:
self.legend = legend
if data == None:
self.data = []
else:
self.data = data
self.xRange = [9e999999999999999999, -9e999999999999999999]
self.yRange = [9e999999999999999999, -9e999999999999999999]
self.markerIndex = 0
self.marker = [(0,0),(0,0)]
self.markerColour = ['red', 'blue']
self.titleFont = tkFont.Font(family='Helvetica', size=12, weight='bold')
self.axisFont = tkFont.Font(family='Helvetica', size=9)
def _drawLabels(self):
self.create_text(self.width/2, self.tMargin/2.5,
text=self.title, anchor=tk.CENTER, font=self.titleFont)
self.create_text(self.width/2,
(self.tMargin/2.5+2+self.titleFont.cget('size')),
text=self.subTitle, anchor=tk.CENTER, font=self.axisFont)
self.create_text(self.lMargin,
self.tMargin - 2 -self.titleFont.cget('size'),
text=self.yAxis, anchor=tk.CENTER, font=self.axisFont)
self.create_text(self.width/2, self.height - self.bMargin/2.5,
text=self.xAxis, anchor=tk.CENTER, font=self.axisFont)
def _drawGrid(self):
rEdge = self.width - self.rMargin
bEdge = self.height - self.bMargin
width = self.width - self.lMargin - self.rMargin
height = self.height - self.bMargin - self.tMargin
self.create_line(self.lMargin, bEdge, rEdge, bEdge, width=2)
self.create_line(self.lMargin, bEdge, self.lMargin,
self.tMargin, width=2)
# Create the x-axis
for i in range(self.xTicks + 1):
x = self.lMargin + i * width / self.xTicks
self.create_line(x, bEdge, x, self.tMargin, width=1,
stipple='gray50')
value = (i*(self.xRange[1] - self.xRange[0]) /
self.xTicks + self.xRange[0])
self.create_text(x, bEdge + self.axisFont.cget('size')/1.5,
text='%.4g'% value, anchor=tk.N, font=self.axisFont)
# Create the y-axis
for i in range(self.yTicks + 1):
y = bEdge - i * height / self.yTicks
self.create_line(self.lMargin, y, rEdge, y, width=1,
stipple='gray50')
value = (i*(self.yRange[1] - self.yRange[0]) /
self.yTicks + self.yRange[0])
self.create_text(self.lMargin -
self.axisFont.cget('size')/1.5, y,
text='%.4g'% value, anchor=tk.E, font=self.axisFont)
def _drawPlots(self):
scaled = []
for i in range(len(self.data)):
colour = self.colours[i%len(self.colours)]
scaled = []
tag = 'tag' + str(i)
tagTxt = 'txt' + str(i)
for data in self.data[i]:
scaled.append(self.axis2pixel(data))
self.create_line(scaled, fill=colour, smooth=1,
width=self.lineWidth, tags=tag)
if len(self.legend) > i:
yLocation = self.tMargin + i*(self.axisFont.cget('size') + 5)
self.create_line([((self.width - self.rMargin) + 10, yLocation),
((self.width - self.rMargin) + 20, yLocation)],
fill=colour, width=self.lineWidth, tags=tag)
self.create_text((self.width - self.rMargin) + 25, yLocation,
text=self.legend[i], anchor=tk.W, font=self.axisFont,
activefill='grey', tags=(tag,tagTxt))
self.tag_bind(tagTxt, '<Button-1>', callback_hide)
def _autoAxis(self):
scaled = []
for data in self.data:
for x,y in data:
self.xRange[0] = min(self.xRange[0], x)
self.xRange[1] = max(self.xRange[1], x)
self.yRange[0] = min(self.yRange[0], y)
self.yRange[1] = max(self.yRange[1], y)
# add some extra space and rounding to get a nice grid
# this is kind of convoluted, maybe someone else can think
# up someting better and submit it, I'd be happy to change this
if self.yRange[1]-self.yRange[0] != 0.0:
decimals = numpy.log10(abs(self.yRange[1]-self.yRange[0]))
else:
decimals = 1
if decimals < 0:
decimals = int(numpy.floor(decimals)-1)
else:
decimals = int(numpy.ceil(decimals)+1)
space = 0.1*abs(self.yRange[1]-self.yRange[0])
self.yRange[0] = self.yRange[0] - space
self.yRange[1] = self.yRange[1] + space
self.yRange[0] = numpy.round(self.yRange[0],decimals=abs(decimals))
self.yRange[1] = numpy.round(self.yRange[1],decimals=abs(decimals))
decimals = numpy.log10(abs(self.xRange[1]-self.xRange[0]))
if decimals < 0:
decimals = int(numpy.floor(decimals))-1
else:
decimals = int(numpy.ceil(decimals))+1
self.xRange[0] = numpy.round(self.xRange[0],decimals=abs(decimals))
self.xRange[1] = numpy.round(self.xRange[1],decimals=abs(decimals))
# make room for the legend
maxText = 0
for legend in self.legend:
maxText = max(maxText, len(legend))
self.rMargin = self.rMargin + maxText*self.axisFont.cget('size') + 25
def pixel2axis(self, pixel):
rEdge = self.width - self.rMargin
bEdge = self.height - self.bMargin
width = self.width - self.lMargin - self.rMargin
height = self.height - self.bMargin - self.tMargin
xScale = width/float((self.xRange[1] - self.xRange[0]))
yScale = height/float((self.yRange[1] - self.yRange[0]))
return ((pixel[0] - self.lMargin)/xScale + self.xRange[0],
(pixel[1] - bEdge)/-yScale + self.yRange[0])
def axis2pixel(self, axis):
rEdge = self.width - self.rMargin
bEdge = self.height - self.bMargin
width = self.width - self.lMargin - self.rMargin
height = self.height - self.bMargin - self.tMargin
if (self.xRange[1] - self.xRange[0]) != 0:
xScale = width/float((self.xRange[1] - self.xRange[0]))
else:
xScale = 1
if (self.yRange[1] - self.yRange[0]) != 0:
yScale = height/float((self.yRange[1] - self.yRange[0]))
else:
yScale = 1
return (self.lMargin + (axis[0] - self.xRange[0]) * xScale,
bEdge - (axis[1] - self.yRange[0]) * yScale)
def plot(self):
if len(self.data) > 0:
self._autoAxis()
self._drawLabels()
self._drawGrid()
self._drawPlots()
self.pack()
# --------------------------------------------------------------------------- #
# Functions for Plotting Circuits
# --------------------------------------------------------------------------- #
def plot(*lst):
"""
eispice Basic Plotter
This function will plot the voltage and current results from an
eispice simulation or simulations.
Arguments:
-- any number of eispice Circuits
Example:
>>> import eispice
>>> cct = eispice.Circuit("Plotter Test")
>>> wave = eispice.Pulse(4, 8, '10n', '2n', '3n', '5n', '20n')
>>> cct.Vx = eispice.V(1, eispice.GND, 4, wave)
>>> cct.Cx = eispice.C(1, eispice.GND, '10n')
>>> cct.tran('0.5n', '100n')
>>> eispice.plot(cct)
"""
root = tk.Tk()
for circuit in lst:
if circuit.results.all() is None:
raise RuntimeError("Circuit %s has no results." % circuit.title)
try:
vPlot.title += " ," + circuit.title
except NameError:
vPlot = Plot(root, circuit.title, subTitle=datetime.now(),
xAxis="Time (s)", yAxis="Voltage (V)")
for n in range(0,(circuit.results.shape[1])):
if ((circuit.variables[n][0] == 'v') and
(circuit.variables[n].find('#') == -1) and
(circuit.variables[n].find('@') == -1)):
data = numpy.take(circuit.results, [0, n], 1)
vPlot.data.append(data)
vPlot.legend.append(circuit.variables[n])
try:
iPlot.title += "," + circuit.title
except NameError:
iPlot = Plot(root, circuit.title, subTitle=datetime.now(),
xAxis="Time (s)", yAxis="Current (A)")
for n in range(0,(circuit.results.shape[1])):
if ((circuit.variables[n][0] == 'i') and
(circuit.variables[n].find('#') == -1)):
data = numpy.take(circuit.results, [0, n], 1)
iPlot.data.append(data)
iPlot.legend.append(circuit.variables[n])
vPlot.plot()
iPlot.plot()
root.mainloop()
def plot_voltage(cct, *nodename):
"""
eispice Voltage Plotter
This function will plot the voltage at a node or at a list of nodes
of an eispice simulation.
Arguments:
cct -- an eispice circuit
nodename -- name of the node to plot as a string or list of nodes
to plot
Example:
>>> import eispice
>>> cct = eispice.Circuit("Voltage Plotter Test")
>>> wave = eispice.Pulse(4, 8, '10n', '2n', '3n', '5n', '20n')
>>> cct.Vx = eispice.V(1, eispice.GND, 4, wave)
>>> cct.Cx = eispice.C(1, eispice.GND, '10n')
>>> cct.tran('0.5n', '100n')
>>> eispice.plot_voltage(cct, 1)
"""
root = tk.Tk()
plot = Plot(root, cct.title, subTitle=datetime.now(),
xAxis="Time (s)", yAxis="Voltage (V)")
if isinstance(nodename, str):
plot.data.append(cct.voltage_array(nodename))
else:
for node in nodename:
plot.data.append(cct.voltage_array(node))
plot.legend.append('v('+str(node)+')')
plot.plot()
root.mainloop()
def plot_current(cct, *devicename):
"""
eispice Current Plotter
This function will plot the current through a device or list of
devices of an eispice simulation.
Arguments:
cct -- an eispice circuit
devicename -- name of the device to plot as a string or list of
devices to plot
Example:
>>> import eispice
>>> cct = eispice.Circuit("Current Plotter Test")
>>> wave = eispice.Pulse(4, 8, '10n', '2n', '3n', '5n', '20n')
>>> cct.Vx = eispice.V(1, eispice.GND, 4, wave)
>>> cct.Cx = eispice.C(1, eispice.GND, '10n')
>>> cct.tran('0.5n', '100n')
>>> eispice.plot_current(cct, 'Vx')
"""
root = tk.Tk()
plot = Plot(root, cct.title, subTitle=datetime.now(),
xAxis="Time (s)", yAxis="Current (A)")
if isinstance(devicename, str):
plot.data.append(cct.current_array(device))
else:
for device in devicename:
plot.data.append(cct.current_array(device))
plot.legend.append('i('+str(device)+')')
plot.plot()
root.mainloop()
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False)
print('Testing Complete')
| gpl-2.0 |
AnasGhrab/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
credp/lisa | lisa/analysis/idle.py | 2 | 10845 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import reduce
import operator
import warnings
import pandas as pd
import numpy as np
from lisa.datautils import series_integrate, df_split_signals, series_combine, df_add_delta, df_refit_index
from lisa.analysis.base import TraceAnalysisBase
from lisa.trace import requires_events, CPU
from lisa.generic import TypedList
from lisa.analysis.base import TraceAnalysisBase
class IdleAnalysis(TraceAnalysisBase):
"""
Support for plotting Idle Analysis data
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
name = 'idle'
###############################################################################
# DataFrame Getter Methods
###############################################################################
@TraceAnalysisBase.cache
@requires_events('cpu_idle')
def df_cpus_idle(self, cpus=None):
"""
Dataframe of the ``cpu_idle`` event, with the following columns:
* ``cpu``
* ``state``: Instead of 4294967295, the -1 type independent value
is used.
:param cpus: Optionally, filter on that list of CPUs
:type cpus: list(int) or None
"""
df = self.trace.df_event('cpu_idle')
# Filter before rename to avoid copying data we will ignore
if cpus is not None:
df = df[df['cpu_id'].isin(cpus)]
df = df.rename({'cpu_id': 'cpu'}, axis=1)
# The event uses an unsigned int even though the kernel uses -1, so use
# -1 to avoid being tied to the event field type size
non_idle = (2 ** 32) -1
df['state'].replace(non_idle, -1, inplace=True)
return df
@TraceAnalysisBase.cache
@df_cpus_idle.used_events
def df_cpu_idle(self, cpu=None):
"""
Same as :meth:`df_cpus_idle` but for one CPU.
"""
if cpu is None:
warnings.warn('cpu=None is deprecated, use df_cpus_idle() to get a dataframe for all CPUs', DeprecationWarning)
cpus = None
else:
cpus = [cpu]
return self.df_cpus_idle(cpus=cpus)
@df_cpu_idle.used_events
def signal_cpu_active(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time
:param cpu: CPU ID
:type cpu: int
:returns: A :class:`pandas.Series` that equals 1 at timestamps where the
CPU is reported to be non-idle, 0 otherwise
"""
cpu_df = self.df_cpu_idle(cpu)
# Turn -1 into 1 and everything else into 0
cpu_active = cpu_df.state.map({-1: 1})
cpu_active.fillna(value=0, inplace=True)
return cpu_active
@signal_cpu_active.used_events
def signal_cluster_active(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster time
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
:returns: A :class:`pandas.Series` that equals 1 at timestamps where at
least one CPU is reported to be non-idle, 0 otherwise
"""
active = self.signal_cpu_active(cluster[0]).to_frame(name=cluster[0])
for cpu in cluster[1:]:
active = active.join(
self.signal_cpu_active(cpu).to_frame(name=cpu),
how='outer'
)
active.fillna(method='ffill', inplace=True)
# There might be NaNs in the signal where we got data from some CPUs
# before others. That will break the .astype(int) below, so drop rows
# with NaN in them.
active.dropna(inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.items()]
)
return cluster_active
@TraceAnalysisBase.cache
@signal_cpu_active.used_events
def df_cpus_wakeups(self):
"""
Get a DataFrame showing when CPUs have woken from idle
:param cpus: List of CPUs to find wakeups for. If None, all CPUs.
:type cpus: list(int) or None
:returns: A :class:`pandas.DataFrame` with
* A ``cpu`` column (the CPU that woke up at the row index)
"""
cpus = list(range(self.trace.cpus_count))
sr = pd.Series(dtype='float64')
for cpu in cpus:
cpu_sr = self.signal_cpu_active(cpu)
cpu_sr = cpu_sr[cpu_sr == 1]
cpu_sr = cpu_sr.replace(1, cpu)
sr = sr.append(cpu_sr)
return pd.DataFrame({'cpu': sr}).sort_index()
@df_cpu_idle.used_events
def df_cpu_idle_state_residency(self, cpu):
"""
Compute time spent by a given CPU in each idle state.
:param cpu: CPU ID
:type cpu: int
:returns: a :class:`pandas.DataFrame` with:
* Idle states as index
* A ``time`` column (The time spent in the idle state)
"""
idle_df = self.df_cpu_idle(cpu)
# Ensure accurate time-based sum of state deltas
idle_df = df_refit_index(idle_df, window=self.trace.window)
# For each state, sum the time spent in it
idle_df = df_add_delta(idle_df)
residency = {
cols['state']: state_df['delta'].sum()
for cols, state_df in df_split_signals(idle_df, ['state'])
}
df = pd.DataFrame.from_dict(residency, orient='index', columns=['time'])
df.index.name = 'idle_state'
return df
@df_cpu_idle.used_events
def df_cluster_idle_state_residency(self, cluster):
"""
Compute time spent by a given cluster in each idle state.
:param cluster: list of CPU IDs
:type cluster: list(int)
:returns: a :class:`pandas.DataFrame` with:
* Idle states as index
* A ``time`` column (The time spent in the idle state)
"""
idle_df = self.df_cpus_idle()
# Create a dataframe with a column per CPU
cols = {
cpu: group['state']
for cpu, group in idle_df.groupby(
'cpu',
sort=False,
observed=True,
)
if cpu in cluster
}
cpus_df = pd.DataFrame(cols, index=idle_df.index)
cpus_df.fillna(method='ffill', inplace=True)
# Ensure accurate time-based sum of state deltas. This will extrapolate
# the known cluster_state both to the left and the right.
cpus_df = df_refit_index(cpus_df, window=self.trace.window)
# Each core in a cluster can be in a different idle state, but the
# cluster lies in the idle state with lowest ID, that is the shallowest
# idle state among the idle states of its CPUs
cluster_state = cpus_df.min(axis='columns')
cluster_state.name = 'cluster_state'
df = cluster_state.to_frame()
# For each state transition, sum the time spent in it
df_add_delta(df, inplace=True)
# For each cluster state, take the sum of the delta column.
# The resulting dataframe is indexed by group keys (cluster_state).
residency = df.groupby('cluster_state', sort=False, observed=True)['delta'].sum()
residency.name = 'time'
residency = residency.to_frame()
residency.index.name = 'idle_state'
return residency
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method()
@df_cpu_idle_state_residency.used_events
def plot_cpu_idle_state_residency(self, cpu: CPU, axis, local_fig, pct: bool=False):
"""
Plot the idle state residency of a CPU
:param cpu: The CPU
:type cpu: int
:param pct: Plot residencies in percentage
:type pct: bool
"""
df = self.df_cpu_idle_state_residency(cpu)
self._plot_idle_state_residency(df, axis, pct)
axis.set_title(f"CPU{cpu} idle state residency")
@TraceAnalysisBase.plot_method()
@df_cluster_idle_state_residency.used_events
def plot_cluster_idle_state_residency(self, cluster: TypedList[CPU], axis, local_fig, pct: bool=False):
"""
Plot the idle state residency of a cluster
:param cluster: The cluster
:type cpu: list(int)
:param pct: Plot residencies in percentage
:type pct: bool
"""
df = self.df_cluster_idle_state_residency(cluster)
self._plot_idle_state_residency(df, axis, pct)
axis.set_title(f"CPUs {cluster} idle state residency")
@TraceAnalysisBase.plot_method(return_axis=True)
@plot_cluster_idle_state_residency.used_events
def plot_clusters_idle_state_residency(self, pct: bool=False, axis=None, **kwargs):
"""
Plot the idle state residency of all clusters
:param pct: Plot residencies in percentage
:type pct: bool
.. note:: This assumes clusters == frequency domains, which may
not hold true...
"""
clusters = self.trace.plat_info['freq-domains']
def plotter(axes, local_fig):
for axis, cluster in zip(axes, clusters):
self.plot_cluster_idle_state_residency(cluster, pct=pct, axis=axis)
return self.do_plot(plotter, nrows=len(clusters), sharex=True, axis=axis, **kwargs)
###############################################################################
# Utility Methods
###############################################################################
def _plot_idle_state_residency(self, df, axis, pct):
"""
A convenient helper to plot idle state residency
"""
if pct:
df = df * 100 / df.sum()
df["time"].plot.barh(ax=axis)
if pct:
axis.set_xlabel("Time share (%)")
else:
axis.set_xlabel("Time (s)")
axis.set_ylabel("Idle state")
axis.grid(True)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| apache-2.0 |
jeffrey-newman/Heatrisk_South_Australia_GoyderProjections | Deprecated/EHFRecalc.py | 1 | 15964 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 16 16:36:40 2016
@author: a1091793
"""
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import scipy as sp
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
# import pandas.rpy.common as com
#gPdtest = importr('gPdtest')
def calcEHF(file_path, path_out, replicate, t95):
# Year, Month, Day, Weather State (you probably won’t use this), Rainfall (mm), Tmax (oC), Tmin (oC), Short wave solar radiation (MJ/m2), Vapour Pressure Deficit (hPa), Morton’s APET (mm).
raw = np.dtype([('year', np.uint), ('month', np.uint), ('day', np.uint), ('wState', np.uint),('rain', np.float_), ('maxT', np.float_), ('minT', np.float_), ('srad', np.float_), ('pres', np.float_), ('apet', np.float_)])
calced = np.dtype([('dmt', np.float_), ('3day', np.float_), ('30day', np.float_), ('EHI_sig', np.float_), ('EHI_accl', np.float_), ('EHF', np.float_), ('EHF_respec', np.float_), ('Heatwave_day', np.uint)])
dt = np.dtype([('raw', raw), ('calced',calced)])
column_header = 'year\tmonth\tday\twState\train\tmaxT\tminT\tsrad\tpres\tapet\tdmt\t3day\t30day\tEHI_sig\tEHI_accl\tEHF\tEHF_respec\n'
raw_data = np.loadtxt(file_path, dtype=raw)
data = np.empty(raw_data.size, dt)
daily_means = np.zeros(raw_data.size)
# load raw data into amalgamated data array
it = np.nditer(data, flags=['f_index'])
while not it.finished:
index = it.index
data[index]['raw'] = raw_data[index]
# Calculate daily mean temperature
# See note in Nairne and Fawcett (2015) The Excess Heat Factor: A Metric for Heatwave Intensity and Its Use in Classifying Heatwave Severity. Int. J. Environ. Res. Public Health 2015, 12, 227-253; doi:10.3390/ijerph120100227
# For heat, better to use max and min records for the same day, even though 'day' for BOM in Australia is based on a 9am to 9am cycle, meaning max and min will usually be on seperate days.
# See comments made by Nairne and Fawcett in their introduction.
rd = data[index]['raw']
cd = data[index]['calced']
cd['dmt'] = ( rd['maxT'] + rd['minT'] ) / 2
daily_means[index] = cd['dmt']
# Calculate three day averages
cd['3day'] = 0
if (index > 1):
for i in range(index-2, index+1):
cd['3day'] += data[i]['calced']['dmt']
cd['3day'] /= 3
# Calculate thirty day averages
cd['30day'] = 0
if (index > 28):
for i in range(index-29, index+1):
cd['30day'] += data[i]['calced']['dmt']
cd['30day'] /= 30
#print index
it.iternext()
# #Calculate the 95th percentile
# t95 = np.percentile(daily_means, 95.0, overwrite_input=True)
# f_t95 = open(path_out_t95, 'w')
# f_t95.write(str(t95))
heat_wave_ehfs = []
# Calculate EHI_sig
# load raw data into amalgamated data array
it = np.nditer(data, flags=['f_index'])
while not it.finished:
index = it.index
data[index]['raw'] = raw_data[index]
# Calculate daily mean temperature
# See note in Nairne and Fawcett (2015) The Excess Heat Factor: A Metric for Heatwave Intensity and Its Use in Classifying Heatwave Severity. Int. J. Environ. Res. Public Health 2015, 12, 227-253; doi:10.3390/ijerph120100227
# For heat, better to use max and min records for the same day, even though 'day' for BOM in Australia is based on a 9am to 9am cycle, meaning max and min will usually be on seperate days.
# See comments made by Nairne and Fawcett in their introduction.
rd = data[index]['raw']
cd = data[index]['calced']
if (index > 1):
cd['EHI_sig'] = cd['3day'] - t95
else:
cd['EHI_sig'] = 0
if (index > 28):
cd['EHI_accl'] = cd['3day'] - cd['30day']
cd['EHF'] = cd['EHI_sig'] * max(1, cd['EHI_accl'])
cd['EHF_respec'] = max(0, cd['EHI_sig']) * max(1, cd['EHI_accl'])
if (cd['EHF_respec'] > 0):
heat_wave_ehfs.append(cd['EHF_respec'])
else:
cd['EHI_accl'] = 0
cd['EHF'] = 0
cd['EHF_respec'] = 0
cd['Heatwave_day'] = 0
#print index
it.iternext()
ehf_file = open('heat_wave_ehfs.txt', 'w')
for item in heat_wave_ehfs:
ehf_file.write(str(item) + "\n")
# We can now determine the 85 percentile based on gpd. Use R for this.
# ro.r('')
sum_days_summer = 0 #0
sum_days_autumn = 0 #1
sum_days_winter = 0 #2
sum_days_spring = 0 #3
sum_ehf_summer = 0 #4
sum_ehf_autumn = 0 #5
sum_ehf_winter = 0 #6
sum_ehf_spring = 0 #7
max_ehf_summer = 0 #8
max_ehf_autumn = 0 #9
max_ehf_winter = 0 #10
max_ehf_spring = 0 #11
tot_days_in_summer = 0 #12
tot_days_in_autumn = 0 #13
tot_days_in_winter = 0 #14
tot_days_in_spring = 0 #15
prop_days_in_summer = 0 #16
prop_days_in_autumn = 0 #17
prop_days_in_winter = 0 #18
prop_days_in_spring = 0 #19
avg_ehf_summer = 0 #20
avg_ehf_autumn = 0 #21
avg_ehf_winter = 0 #22
avg_ehf_spring = 0 #23
sum_days = 0 #24
sum_ehf = 0 #25
max_ehf = 0 #26
tot_days = 0 #27
prop_days = 0 #28
avg_ehf = 0 #29
yearly_stats = {}
f = open(path_out, 'w')
f.write(column_header)
# load raw data into amalgamated data array
it = np.nditer(data, flags=['f_index'])
while not it.finished:
index = it.index
data[index]['raw'] = raw_data[index]
# Calculate daily mean temperature
# See note in Nairne and Fawcett (2015) The Excess Heat Factor: A Metric for Heatwave Intensity and Its Use in Classifying Heatwave Severity. Int. J. Environ. Res. Public Health 2015, 12, 227-253; doi:10.3390/ijerph120100227
# For heat, better to use max and min records for the same day, even though 'day' for BOM in Australia is based on a 9am to 9am cycle, meaning max and min will usually be on seperate days.
# See comments made by Nairne and Fawcett in their introduction.
rd = data[index]['raw']
cd = data[index]['calced']
if (cd['EHF_respec'] > 0):
cd['Heatwave_day'] = 1
if ((index - 1 ) > 0 ):
data[index - 1]['calced']['Heatwave_day'] = 1
if ((index - 2 ) > 0 ):
data[index - 2]['calced']['Heatwave_day'] = 1
dat = str(rd[0]) + "\t" + str(rd[1]) + "\t" + str(rd[2]) + "\t" + str(rd[3]) + "\t" + str(rd[4]) + "\t" + str(rd[5]) + "\t" + str(rd[6]) + "\t" + str(rd[7]) + "\t" + str(rd[8]) + "\t" + str(rd[9]) + "\t" + str(cd[0]) + "\t" + str(cd[0]) + "\t" + str(cd[1]) + "\t" + str(cd[2]) + "\t" + str(cd[3]) + "\t" + str(cd[4]) + "\t" + str(cd[5]) + "\t" + str(cd[6]) + "\t" + str(cd[7]) + "\n"
f.write(dat)
it.iternext()
year = rd['year']
if year not in yearly_stats:
yearly_stats[year] = [0.0] * 30
month = rd['month']
if (1 <= month <= 2) or (month == 12):
# summer
yearly_stats[year][12] += 1
tot_days_in_summer += 1
if (cd['EHF_respec'] > 0):
yearly_stats[year][0] += 1
sum_days_summer += 1
yearly_stats[year][4] += cd['EHF_respec']
sum_ehf_summer += cd['EHF_respec']
if (cd['EHF_respec'] > max_ehf_summer):
max_ehf_summer = cd['EHF_respec']
if (cd['EHF_respec'] > yearly_stats[year][8]):
yearly_stats[year][8] = cd['EHF_respec']
if (3 <= month <= 5):
#autumn
yearly_stats[year][13] += 1
tot_days_in_autumn += 1
if (cd['EHF_respec'] > 0):
yearly_stats[year][1] += 1
sum_days_autumn += 1
sum_ehf_autumn += cd['EHF_respec']
yearly_stats[year][5] += cd['EHF_respec']
if (cd['EHF_respec'] > max_ehf_autumn):
max_ehf_autumn = cd['EHF_respec']
if (cd['EHF_respec'] > yearly_stats[year][9]):
yearly_stats[year][9] = cd['EHF_respec']
if (6 <= month <= 8):
#winter
yearly_stats[year][14] += 1
tot_days_in_winter += 1
if (cd['EHF_respec'] > 0):
yearly_stats[year][2] += 1
sum_days_winter += 1
sum_ehf_winter += cd['EHF_respec']
yearly_stats[year][6] += cd['EHF_respec']
if (cd['EHF_respec'] > max_ehf_winter):
max_ehf_winter = cd['EHF_respec']
if (cd['EHF_respec'] > yearly_stats[year][10]):
yearly_stats[year][10] = cd['EHF_respec']
if (9 <= month <= 11):
yearly_stats[year][15] += 1
tot_days_in_spring+= 1
if (cd['EHF_respec'] > 0):
yearly_stats[year][3] += 1
sum_days_spring += 1
sum_ehf_spring += cd['EHF_respec']
yearly_stats[year][7] += cd['EHF_respec']
if (cd['EHF_respec'] > max_ehf_spring):
max_ehf_spring = cd['EHF_respec']
if (cd['EHF_respec'] > yearly_stats[year][11]):
yearly_stats[year][11] = cd['EHF_respec']
prop_days_in_summer = sum_days_summer / float(tot_days_in_summer)
prop_days_in_autumn = sum_days_autumn / float(tot_days_in_autumn)
prop_days_in_winter = sum_days_winter / float(tot_days_in_winter)
prop_days_in_spring = sum_days_spring / float(tot_days_in_spring)
if (sum_days_summer > 0):
avg_ehf_summer = sum_ehf_summer / float(sum_days_summer)
else:
avg_ehf_summer = 0
if (sum_days_autumn > 0):
avg_ehf_autumn = sum_ehf_autumn / float(sum_days_autumn)
else:
avg_ehf_autumn = 0
if (sum_days_winter > 0):
avg_ehf_winter = sum_ehf_winter / float(sum_days_winter)
else:
avg_ehf_winter = 0
if (sum_days_spring > 0):
avg_ehf_spring = sum_ehf_spring / float(sum_days_spring)
else:
avg_ehf_spring = 0
sum_days = sum_days_summer + sum_days_autumn + sum_days_winter + sum_days_spring
sum_ehf = sum_ehf_summer + sum_ehf_autumn + sum_ehf_winter + sum_ehf_spring
max_ehf = max(max_ehf_summer, max_ehf_autumn, max_ehf_winter, max_ehf_spring)
tot_days = tot_days_in_summer + tot_days_in_autumn + tot_days_in_winter + tot_days_in_spring
prop_days = sum_days / float(tot_days)
if (sum_days > 0):
avg_ehf = sum_ehf / sum_days
else:
avg_ehf = 0
for year, stats in yearly_stats.items():
stats[16] = stats[0] / float(stats[12])
stats[17] = stats[1] / float(stats[13])
stats[18] = stats[2] / float(stats[14])
stats[19] = stats[3] / float(stats[15])
if (stats[0] > 0):
stats[20] = stats[4] / float(stats[0])
else:
stats[20] = 0
if (stats[1] > 0):
stats[21] = stats[5] / float(stats[1])
else:
stats[21] = 0
if (stats[2] > 0):
stats[22] = stats[6] / float(stats[2])
else:
stats[22] = 0
if (stats[3] > 0):
stats[23] = stats[7] / float(stats[3])
else:
stats[23] = 0
stats[24] = stats[0] + stats[1] + stats[2] + stats[3]
stats[25] = stats[4] + stats[5] + stats[6] + stats[7]
stats[26] = max(stats[8], stats[9], stats[10], stats[11])
stats[27] = stats[12] + stats[13] + stats[14] + stats[15]
stats[28] = stats[24] / float(stats[27])
if (stats[24] > 0):
stats[29] = stats[25] / stats[24]
else:
stats[29] = 0
with open(('stats_rep' + str(replicate) + '.txt'), 'w') as f_stats:
f_stats.write("stat\tval")
f_stats.write("sum_days_summer\t" + str(sum_days_summer) + "\n")
f_stats.write("sum_days_autumn\t" + str(sum_days_autumn) + "\n")
f_stats.write("sum_days_winter\t" + str(sum_days_winter) + "\n")
f_stats.write("sum_days_spring\t" + str(sum_days_spring) + "\n")
f_stats.write("sum_ehf_summer\t" + str(sum_ehf_summer ) + "\n")
f_stats.write("sum_ehf_autumn\t" + str(sum_ehf_autumn ) + "\n")
f_stats.write("sum_ehf_winter\t" + str(sum_ehf_winter ) + "\n")
f_stats.write("sum_ehf_spring\t" + str(sum_ehf_spring ) + "\n")
f_stats.write("max_ehf_summer\t" + str(max_ehf_summer ) + "\n")
f_stats.write("max_ehf_autumn\t" + str(max_ehf_autumn ) + "\n")
f_stats.write("max_ehf_winter\t" + str(max_ehf_winter ) + "\n")
f_stats.write("max_ehf_spring\t" + str(max_ehf_spring ) + "\n")
f_stats.write("tot_days_in_summer\t" + str(tot_days_in_summer) + "\n")
f_stats.write("tot_days_in_autumn\t" + str(tot_days_in_autumn) + "\n")
f_stats.write("tot_days_in_winter\t" + str(tot_days_in_winter) + "\n")
f_stats.write("tot_days_in_spring\t" + str(tot_days_in_spring) + "\n")
f_stats.write("prop_days_in_summer\t" + str(prop_days_in_summer) + "\n")
f_stats.write("prop_days_in_autumn\t" + str(prop_days_in_autumn) + "\n")
f_stats.write("prop_days_in_winter\t" + str(prop_days_in_winter) + "\n")
f_stats.write("prop_days_in_spring\t" + str(prop_days_in_spring) + "\n")
f_stats.write("avg_ehf_summer\t" + str(avg_ehf_summer) + "\n")
f_stats.write("avg_ehf_autumn\t" + str(avg_ehf_autumn) + "\n")
f_stats.write("avg_ehf_winter\t" + str(avg_ehf_winter) + "\n")
f_stats.write("avg_ehf_spring\t" + str(avg_ehf_spring) + "\n")
f_stats.write("sum_days\t" + str(sum_days) + "\n")
f_stats.write("sum_ehf\t" + str(sum_ehf) + "\n")
f_stats.write("max_ehf\t" + str(max_ehf) + "\n")
f_stats.write("tot_days\t" + str(tot_days) + "\n")
f_stats.write("prop_days\t" + str(prop_days) + "\n")
f_stats.write("avg_ehf\t" + str(avg_ehf))
stats = [sum_days_summer, sum_days_autumn, sum_days_winter, sum_days_spring, sum_ehf_summer, sum_ehf_autumn, sum_ehf_winter, sum_ehf_spring, max_ehf_summer, max_ehf_autumn, max_ehf_winter, max_ehf_spring, tot_days_in_summer, tot_days_in_autumn, tot_days_in_winter, tot_days_in_spring, prop_days_in_summer, prop_days_in_autumn, prop_days_in_winter, prop_days_in_spring, avg_ehf_summer, avg_ehf_autumn, avg_ehf_winter, avg_ehf_spring, sum_days, sum_ehf, max_ehf, tot_days, prop_days, avg_ehf]
with open('yearly_stats_rep' + str(replicate) + '.txt', 'w') as f_ystats:
f_ystats.write("year\tsum_days_summer\tsum_days_autumn\tsum_days_winter\tsum_days_spring\tsum_ehf_summer\tsum_ehf_autumn\tsum_ehf_winter\tsum_ehf_spring\tmax_ehf_summer\tmax_ehf_autumn\tmax_ehf_winter\tmax_ehf_spring\ttot_days_in_summer\ttot_days_in_autumn\ttot_days_in_winter\ttot_days_in_spring\tprop_days_in_summer\tprop_days_in_autumn\tprop_days_in_winter\tprop_days_in_spring\tavg_ehf_summer\tavg_ehf_autumn\tavg_ehf_winter\tavg_ehf_spring\tsum_days\tsum_ehf\tmax_ehf\ttot_days\tprop_days\tavg_ehf\t")
for year, stats in yearly_stats.items():
f_ystats.write(str(year)+"\t")
for i in range(0, 30):
f_ystats.write(str(stats[i]) + "\t")
f_ystats.write("\n")
return yearly_stats
#c=ss.genpareto.fit(heat_wave_ehfs) | gpl-3.0 |
samuroi/SamuROI | samuroi/gui/widgets/canvasbase.py | 1 | 1470 | from contextlib import contextmanager
from PyQt5 import QtCore
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
class CanvasBase(FigureCanvas):
"""Plot the actual 2D frame of data with all mask artists and the overlay"""
# idea: we could implement our own context manager which could be used as a decorator to directly disable draw.
@contextmanager
def disable_draw(self):
# store the original draw method
draw = self.draw
def noop(*args):
pass
# override the draw method as noop
self.draw = noop
# yield and run code in context
yield
# restore the original behaviour of draw
self.draw = draw
@contextmanager
def draw_on_exit(self, func = None):
# store the original draw method
draw = self.draw
def noop(*args):
pass
# override the draw method as noop
self.draw = noop
# yield and run code in context
yield
# restore the original behaviour of draw
self.draw = draw
self.draw()
def __init__(self):
# initialize the canvas where the Figure renders into
FigureCanvas.__init__(self, Figure())
# allow this widget to have the focus set by tab or mouse click
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.axes = self.figure.add_subplot(111)
| mit |
chenyyx/scikit-learn-doc-zh | examples/en/plot_missing_values.py | 35 | 3059 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better
results than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via
cross-validation. Sometimes dropping rows or using marker values is
more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| gpl-3.0 |
ndardenne/pymatgen | pymatgen/analysis/wulff.py | 2 | 20128 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
Tran, R.; Xu, Z.; Radhakrishnan, B.; Winston, D.; Persson, K. A.; Ong, S. P.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from __future__ import division, unicode_literals
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_recp_symmetry_operation
from pymatgen.util.coord_utils import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mpl3
__author__ = 'Zihan Xu, Richard Tran, Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Zihan Xu'
__email__ = '[email protected]'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet(object):
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape(object):
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = get_recp_symmetry_operation(self.structure, self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
plane_color = scalar_map.to_rgba(e_surf, alpha=alpha)
color_list[i] = plane_color
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=plane_color, alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, \
e_surf_on_wulff_list
def show(self, *args, **kwargs):
"""
Show the Wulff plot.
Args:
\*args: Passed to get_plot.
\*\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_plot(self, color_set='PuBu', grid_off=True, axis_off=True,
show_area=False, alpha=1, off_color='red', direction=None,
bar_pos=(0.75, 0.15, 0.05, 0.65), bar_on=False,
legend_on=True, aspect_ratio=(8, 8)):
"""
Get the Wulff shape plot.
Args:
color_set: default is 'PuBu'
grid_off (bool): default is True
axis_off (bool): default is Ture
show_area (bool): default is False
alpha (float): chosen from 0 to 1 (float), default is 1
off_color: color_legend for off_wulff facets on show_area legend
direction: default is (1, 1, 1)
bar_pos: default is [0.75, 0.15, 0.05, 0.65]
bar_on (bool): default is False
legend_on (bool): default is True
aspect_ratio: default is (8, 8)
Return:
(matplotlib.pyplot)
"""
color_list, color_proxy, color_proxy_on_wulff, \
miller_on_wulff, e_surf_on_wulff = self._get_colors(
color_set, alpha, off_color)
if not direction:
# If direction is not specified, use the miller indices of
# maximum area.
direction = max(self.area_fraction_dict.items(),
key=lambda x: x[1])[0]
fig = plt.figure()
fig.set_size_inches(aspect_ratio[0], aspect_ratio[1])
azim, elev = self._get_azimuth_elev([direction[0], direction[1],
direction[-1]])
wulff_pt_list = self.wulff_pt_list
ax = mpl3.Axes3D(fig, azim=azim, elev=elev)
for plane in self.facets:
# check whether [pts] is empty
if len(plane.points) < 1:
# empty, plane is not on_wulff.
continue
# assign the color for on_wulff facets according to its
# index and the color_list for on_wulff
plane_color = color_list[plane.index]
lines = list(plane.outer_lines)
pt = []
prev = None
while len(lines) > 0:
if prev is None:
l = lines.pop(0)
else:
for i, l in enumerate(lines):
if prev in l:
l = lines.pop(i)
if l[1] == prev:
l.reverse()
break
# make sure the lines are connected one by one.
# find the way covering all pts and facets
pt.append(self.wulff_pt_list[l[0]].tolist())
pt.append(self.wulff_pt_list[l[1]].tolist())
prev = l[1]
# plot from the sorted pts from [simpx]
tri = mpl3.art3d.Poly3DCollection([pt])
tri.set_color(plane_color)
tri.set_edgecolor("#808080")
ax.add_collection3d(tri)
# set ranges of x, y, z
# find the largest distance between on_wulff pts and the origin,
# to ensure complete and consistent display for all directions
r_range = max([np.linalg.norm(x) for x in wulff_pt_list])
ax.set_xlim([-r_range * 1.1, r_range * 1.1])
ax.set_ylim([-r_range * 1.1, r_range * 1.1])
ax.set_zlim([-r_range * 1.1, r_range * 1.1])
# add legend
if legend_on:
color_proxy = color_proxy
if show_area:
ax.legend(color_proxy, self.miller_area, loc='upper left',
bbox_to_anchor=(0, 1), fancybox=True, shadow=False)
else:
ax.legend(color_proxy_on_wulff, miller_on_wulff,
loc='upper center',
bbox_to_anchor=(0.5, 1), ncol=3, fancybox=True,
shadow=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add colorbar
if bar_on:
cmap = plt.get_cmap(color_set)
cmap.set_over('0.25')
cmap.set_under('0.75')
bounds = [round(e, 2) for e in e_surf_on_wulff]
bounds.append(1.2 * bounds[-1])
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# display surface energies
ax1 = fig.add_axes(bar_pos)
cbar = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, boundaries=[0] + bounds + [10],
extend='both', ticks=bounds[:-1], spacing='proportional',
orientation='vertical')
cbar.set_label('Surface Energies ($J/m^2$)', fontsize=100)
if grid_off:
ax.grid('off')
if axis_off:
ax.axis('off')
return plt
def _get_azimuth_elev(self, miller_index):
"""
Args:
miller_index: viewing direction
Returns:
azim, elev for plotting
"""
if miller_index == (0, 0, 1) or miller_index == (0, 0, 0, 1):
return 0, 90
else:
cart = self.lattice.get_cartesian_coords(miller_index)
azim = get_angle([cart[0], cart[1], 0], (1, 0, 0))
v = [cart[0], cart[1], 0]
elev = get_angle(cart, v)
return azim, elev
@property
def volume(self):
"""
Volume of the Wulff shape
"""
return self.wulff_convex.volume
@property
def miller_area_dict(self):
"""
Returns {hkl: area_hkl on wulff}
"""
return dict(zip(self.miller_list, self.color_area))
@property
def miller_energy_dict(self):
"""
Returns {hkl: surface energy_hkl}
"""
return dict(zip(self.miller_list, self.e_surf_list))
@property
def surface_area(self):
"""
Total surface area of Wulff shape.
"""
return sum(self.miller_area_dict.values())
@property
def weighted_surface_energy(self):
"""
Returns:
sum(surface_energy_hkl * area_hkl)/ sum(area_hkl)
"""
tot_area_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_area_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_area_energy / self.surface_area
@property
def area_fraction_dict(self):
"""
Returns:
(dict): {hkl: area_hkl/total area on wulff}
"""
return {hkl: self.miller_area_dict[hkl] / self.surface_area
for hkl in self.miller_area_dict.keys()}
@property
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy)\
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy
@property
def shape_factor(self):
"""
This is useful for determining the critical nucleus size.
A large shape factor indicates great anisotropy.
See Ballufi, R. W., Allen, S. M. & Carter, W. C. Kinetics
of Materials. (John Wiley & Sons, 2005), p.461
Returns:
(float) Shape factor.
"""
return self.surface_area / (self.volume ** (2 / 3))
| mit |
Tong-Chen/scikit-learn | examples/plot_kernel_approximation.py | 6 | 7976 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please not that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import pylab as pl
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
pl.figure(figsize=(8, 8))
accuracy = pl.subplot(211)
# second y axis for timeings
timescale = pl.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
pl.tight_layout()
pl.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
pl.contourf(multiples, multiples, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.tight_layout()
pl.show()
| bsd-3-clause |
TakakiNishio/grasp_planning | rgb/evaluator/cnn03a/visualizer.py | 3 | 2072 | # python library
import numpy as np
from scipy import misc
from PIL import Image, ImageDraw, ImageFont
import shutil
import os
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import json
#visualize loss reduction
def loss_visualizer():
epoch = []
train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []
f = open('./result/log', 'r') #load log file
data = json.load(f)
f.close()
value = []
for i in range(0,len(data)):
value = data[i]
epoch.append(value["epoch"])
train_loss.append(value["main/loss"])
test_loss.append(value["validation/main/loss"])
train_accuracy.append(value["main/accuracy"])
test_accuracy.append(value["validation/main/accuracy"])
#fig1 = plt.figure(1)
fig1 = plt.figure(1,figsize=(8,6))
plt.plot(epoch,train_loss,"b",linewidth=2,label = "train LOSS")
plt.plot(epoch,test_loss,"g",linewidth=2,label = "validation LOSS")
plt.yscale('log')
plt.grid(which="both")
#plt.title("LOSS reduction")
plt.legend(fontsize=20) #18
plt.tick_params(labelsize=22) #18
plt.xlabel("epoch",fontname='roman', fontsize=26)
plt.ylabel("LOSS",fontname='roman', fontsize=26)
fig1.subplots_adjust(left=0.15,bottom=0.15)
ax = fig1.add_subplot(111)
#fig2 = plt.figure(2)
fig2 = plt.figure(2,figsize=(8,6))
plt.plot(epoch,train_accuracy,"b",linewidth=2,label = "train accuracy")
plt.plot(epoch,test_accuracy,"g",linewidth=2,label = "validation accuracy ")
#plt.title("accuracy increase")
plt.legend(loc = "lower right",fontsize=20)
plt.tick_params(labelsize=22)
plt.xlabel("epoch",fontname='roman',fontsize=26)
plt.ylabel("accuracy",fontname='roman',fontsize=26)
plt.yticks([i*0.1 for i in range(5,10,1)])
fig2.subplots_adjust(left=0.15,bottom=0.15)
ax = fig2.add_subplot(111)
#main
if __name__ == '__main__':
loss_visualizer()
plt.show()
| gpl-3.0 |
GaZ3ll3/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 41 | 3087 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import clean_warning_registry
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_array_equal(chi2.get_support(indices=True), [0])
assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_array_almost_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chi2_unused_feature():
# Unused feature should evaluate to NaN
# and should issue no runtime warning
clean_warning_registry()
with warnings.catch_warnings(record=True) as warned:
warnings.simplefilter('always')
chi, p = chi2([[1, 0], [0, 0]], [1, 0])
for w in warned:
if 'divide by zero' in repr(w):
raise AssertionError('Found unexpected warning %s' % w)
assert_array_equal(chi, [1, np.nan])
assert_array_equal(p[1], np.nan)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
pkainz/pylearn2 | pylearn2/testing/skip.py | 49 | 1363 | """
Helper functions for determining which tests to skip.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from nose.plugins.skip import SkipTest
import os
from theano.sandbox import cuda
scipy_works = True
try:
import scipy
except ImportError:
# pyflakes gets mad if you set scipy to None here
scipy_works = False
sklearn_works = True
try:
import sklearn
except ImportError:
sklearn_works = False
h5py_works = True
try:
import h5py
except ImportError:
h5py_works = False
matplotlib_works = True
try:
from matplotlib import pyplot
except ImportError:
matplotlib_works = False
def skip_if_no_data():
if 'PYLEARN2_DATA_PATH' not in os.environ:
raise SkipTest()
def skip_if_no_scipy():
if not scipy_works:
raise SkipTest()
def skip_if_no_sklearn():
if not sklearn_works:
raise SkipTest()
def skip_if_no_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled.')
def skip_if_no_h5py():
if not h5py_works:
raise SkipTest()
def skip_if_no_matplotlib():
if not matplotlib_works:
raise SkipTest("matplotlib and pyplot are not available")
| bsd-3-clause |
aleksandr-bakanov/astropy | astropy/visualization/wcsaxes/core.py | 2 | 30823 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from functools import partial
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import SkyCoord, BaseCoordinateFrame
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
from .transforms import CoordinateTransform
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta, transform_contour_set_inplace
from .frame import RectangularFrame, RectangularFrame1D
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=None,
**kwargs):
"""
"""
super().__init__(fig, rect, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif (wcs is not None and (wcs.pixel_n_dim == 1 or
(slices is not None and 'y' not in slices))):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(coord.format_coord(world[coord.coord_index], format='ascii'))
coord_string = ' '.join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop('origin', 'lower')
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = 'lower'
elif origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop('transform', None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop('transform', None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(native_frame)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
return super().plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(self.wcs, self.frame_class, slices=slices)
self.coords = CoordinatesMap(self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
self._transform_pixel2world = transform
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(coord_meta.get('default_axislabel_position', ['b', 'l'])):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticklabel_position', ['b', 'l'])):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(coord_meta.get('default_ticks_position', ['bltr', 'bltr'])):
self.coords[ind].set_ticks_position(pos)
if rcParams['axes.grid']:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
ticks_locs = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
for coord in coords:
coord._draw_ticks(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
ticks_locs=ticks_locs[coord])
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
ticks_locs=ticks_locs[coord],
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
def draw(self, renderer, inframe=False):
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, inframe=inframe)
self._drawn = True
# MATPLOTLIB_LT_30: The ``kwargs.pop('label', None)`` is to ensure
# compatibility with Matplotlib 2.x (which has label) and 3.x (which has
# xlabel). While these are meant to be a single positional argument,
# Matplotlib internally sometimes specifies e.g. set_xlabel(xlabel=...).
def set_xlabel(self, xlabel=None, labelpad=1, **kwargs):
if xlabel is None:
xlabel = kwargs.pop('label', None)
if xlabel is None:
raise TypeError("set_xlabel() missing 1 required positional argument: 'xlabel'")
for coord in self.coords:
if 'b' in coord.axislabels.get_visible_axes():
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, **kwargs):
if ylabel is None:
ylabel = kwargs.pop('label', None)
if ylabel is None:
raise TypeError("set_ylabel() missing 1 required positional argument: 'ylabel'")
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if 'l' in coord.axislabels.get_visible_axes():
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if 'b' in coord.axislabels.get_visible_axes():
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if 'l' in coord.axislabels.get_visible_axes():
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(frame, self.frame_class)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (self._transform_pixel2world +
CoordinateTransform(self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in) +
transform_world2pixel)
elif frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if frame == 'world':
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(self._transform_pixel2world.frame_out, frame)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x.
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', *, which='major', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
"""
if not hasattr(self, 'coords'):
return
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
def tick_params(self, axis='both', **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, 'coords'):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == 'both':
for pos in ('bottom', 'left', 'top', 'right'):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if 'label' + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ('x', 'y') and self.frame_class is RectangularFrame:
spine = 'b' if axis == 'x' else 'l'
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
| bsd-3-clause |
pyNLO/PyNLO | src/examples/supercontinuum_with_FROG.py | 2 | 4038 | import numpy as np
import matplotlib.pyplot as plt
import pynlo
FWHM = 0.050 # pulse duration (ps)
pulseWL = 1550 # pulse central wavelength (nm)
EPP = 50e-12 # Energy per pulse (J)
GDD = 0.0 # Group delay dispersion (ps^2)
TOD = 0.0 # Third order dispersion (ps^3)
Window = 10.0 # simulation window (ps)
Steps = 40 # simulation steps
Points = 2**13 # simulation points
error = 0.2
beta2 = -120 # (ps^2/km)
beta3 = 0.00 # (ps^3/km)
beta4 = 0.005 # (ps^4/km)
Length = 10 # length in mm
Alpha = 0.0 # attentuation coefficient (dB/cm)
Gamma = 1000 # Gamma (1/(W km)
fibWL = pulseWL # Center WL of fiber (nm)
Raman = True # Enable Raman effect?
Steep = True # Enable self steepening?
alpha = np.log((10**(Alpha * 0.1))) * 100 # convert from dB/cm to 1/m
# set up plots for the results:
fig = plt.figure(figsize=(8,8))
ax0 = plt.subplot2grid((3,2), (0, 0), rowspan=1)
ax1 = plt.subplot2grid((3,2), (0, 1), rowspan=1)
ax2 = plt.subplot2grid((3,2), (1, 0), rowspan=2, sharex=ax0)
ax3 = plt.subplot2grid((3,2), (1, 1), rowspan=2, sharex=ax1)
######## This is where the PyNLO magic happens! ############################
# create the pulse!
pulse = pynlo.light.DerivedPulses.SechPulse(power = 1, # Power will be scaled by set_epp
T0_ps = FWHM/1.76,
center_wavelength_nm = pulseWL,
time_window_ps = Window,
GDD=GDD, TOD=TOD,
NPTS = Points,
frep_MHz = 100,
power_is_avg = False)
# set the pulse energy!
pulse.set_epp(EPP)
# create the fiber!
fiber1 = pynlo.media.fibers.fiber.FiberInstance()
fiber1.generate_fiber(Length * 1e-3, center_wl_nm=fibWL, betas=(beta2, beta3, beta4),
gamma_W_m=Gamma * 1e-3, gvd_units='ps^n/km', gain=-alpha)
# Propagation
evol = pynlo.interactions.FourWaveMixing.SSFM.SSFM(local_error=error, USE_SIMPLE_RAMAN=True,
disable_Raman = np.logical_not(Raman),
disable_self_steepening = np.logical_not(Steep))
y, AW, AT, pulse_out = evol.propagate(pulse_in=pulse, fiber=fiber1, n_steps=Steps)
########## That's it! Physics complete. Just plotting commands from here! ################
F = pulse.F_THz # Frequency grid of pulse (THz)
def dB(num):
return 10 * np.log10(np.abs(num)**2)
zW = dB( np.transpose(AW)[:, (F > 0)] )
zT = dB( np.transpose(AT) )
y_mm = y * 1e3 # convert distance to mm
ax0.plot(pulse_out.F_THz, dB(pulse_out.AW), color = 'r')
ax1.plot(pulse_out.T_ps, dB(pulse_out.AT), color = 'r')
ax0.plot(pulse.F_THz, dB(pulse.AW), color = 'b')
ax1.plot(pulse.T_ps, dB(pulse.AT), color = 'b')
extent = (np.min(F[F > 0]), np.max(F[F > 0]), 0, Length)
ax2.imshow(zW, extent=extent,
vmin=np.max(zW) - 40.0, vmax=np.max(zW),
aspect='auto', origin='lower')
extent = (np.min(pulse.T_ps), np.max(pulse.T_ps), np.min(y_mm), Length)
ax3.imshow(zT, extent=extent,
vmin=np.max(zT) - 40.0, vmax=np.max(zT),
aspect='auto', origin='lower')
ax0.set_ylabel('Intensity (dB)')
ax0.set_ylim( - 80, 0)
ax1.set_ylim( - 40, 40)
ax2.set_ylabel('Propagation distance (mm)')
ax2.set_xlabel('Frequency (THz)')
ax2.set_xlim(0,400)
ax3.set_xlabel('Time (ps)')
fig, axs = plt.subplots(1,2,figsize=(10,5))
for ax, gate_type in zip(axs,('xfrog', 'frog')):
DELAYS, FREQS, extent, spectrogram = pulse_out.spectrogram(gate_type=gate_type, gate_function_width_ps=0.05, time_steps=1000)
ax.imshow(spectrogram, aspect='auto', extent=extent)
ax.set_xlabel('Time (ps)')
ax.set_ylabel('Frequency (THz)')
ax.set_title(gate_type)
plt.show() | gpl-3.0 |
orbkit/orbkit | doc/literature/parse_bib.py | 1 | 7296 | from __future__ import division, print_function
import numpy
import io
class Bibliography:
'''Class handling parsing, plotting and printing of ORBKIT bibliographic data for the website'''
def __init__(self):
self.unique_entries = None
self.entries = []
self.filenames = []
self.keys = [key for key in self.get_template()]
self.months = {'jan': 0, 'feb': 1, 'mar': 2, 'apr': 3,
'may': 4, 'jun': 5, 'jul': 6, 'aug': 7,
'sep': 8, 'oct': 9, 'nov': 10, 'dec': 11,}
self.journals = {'Molecular Physics': 'Mol. Phys.',
'Physical Review A': 'Phys. Rev. A',
'Physical Review B': 'Phys. Rev. B',
'The Journal of Physical Chemistry A': 'J. Phys. Chem. A',
'The Journal of Physical Chemistry C': 'J. Phys. Chem. C',
'Molecules': 'Molecules',
'Journal of Computational Chemistry': 'J. Comput. Chem.',
'Chemical Physics': 'Chem. Phys.',
'Chemical Physics Letters': 'Chem. Phys. Lett.',
'Journal of Molecular Graphics and Modelling': 'J. Mol. Graph. Model.',
'The Journal of Chemical Physics': 'J. Chem. Phys.',
'Journal of Computer-Aided Molecular Design': 'J. Comput. Aided Mol. Des.',
'The Journal of Physical Chemistry Letters': 'J. Phys. Chem. Lett.',
'Physica B: Condensed Matter': 'Physica B',
'Physical Chemistry Chemical Physics': 'Phys. Chem. Chem. Phys.',
'International Journal of Quantum Chemistry': 'Int. J. Quantum Chem.',
'Inorganic Chemistry': 'Inorg. Chem.',
'Journal of the American Chemical Society': 'J. Am. Chem. Soc.',
'Journal of Chemical Theory and Computation': 'J. Chem. Theory Comput.',
}
def get_template(self):
return {'year': None,
'month': None,
'ym': None,
'title': None,
'author': None,
'volume': None,
'number': None,
'journal': None,
'pages': None,
'doi': None,
'url': None
}
def read_bib(self, filename):
'''Reads a bib file and parses the data.
'''
with io.open(filename + '.bib', 'r',encoding='ISO-8859-1') as fd:
entry = []
for line in fd.readlines():
if '@article' in line:
entry.append(self.get_template())
else:
splitline = line.split('=')
for key in self.keys:
if key in splitline[0]:
for i in range(10):
if splitline[1][0] in [',', '{', ' ', '\t']:
splitline[1] = splitline[1][1:]
if splitline[1][-1] in [',', '}', ' ', '\t', '\n']:
splitline[1] = splitline[1][:-1]
entry[-1][key] = splitline[1]
for i in range(len(entry)):
name = entry[i]['author'].split('and')[0]
if ',' in name:
name = name.split(',')
name = ' '.join([name[1].strip(),name[0].strip()])
if len(entry[i]['author'].split('and')) == 1:
entry[i]['author'] = name + ' '
else:
entry[i]['author'] = name + ' *et al.* '
entry[i]['author'] = entry[i]['author']
if 'arXiv' in entry[i]['journal']:
entry[i]['pages'] = entry[i]['journal'].split()[-1]
entry[i]['journal'] = 'arXiv preprint'
else:
entry[i]['journal'] = self.journals[entry[i]['journal']]
entry[i]['ym'] = int(entry[i]['year']) + self.months[entry[i]['month'].lower()] / 12.
self.entries.append(entry)
self.filenames.append(filename)
def sort(self):
'''Sorts bib entries by year and month of publication and writes the results to a .csv file.
'''
for ie, entry in enumerate(self.entries):
entry_sort = []
ym = numpy.zeros(len(entry))
for i, item in enumerate(entry):
ym[i] = item['ym']
for i in numpy.argsort(ym):
entry_sort.append(self.entries[ie][i])
self.entries[ie] = entry_sort
def url_exists(self, url):
exists = False
for entry in self.unique_entries:
if entry['url'] == url:
return True
def clean(self):
'''Uses url to determine unique citations'''
self.unique_entries = []
for entry in self.entries:
for item in entry:
if not self.url_exists(item['url']):
self.unique_entries.append(item)
def wirte_csv(self):
with io.open('citations.csv', 'w',encoding='ISO-8859-1') as fd:
for i, entry in enumerate(self.unique_entries):
if 'arxiv' in entry['journal'].lower():
print(u'{i},"{author} `{journal} <{url}>`__ {pages} ({year})."'.format(i=i+1,**entry).replace(u' ', u' '), file=fd)
elif entry['volume'] is None or entry['pages'] is None:
print(u'{i},"{author} `{journal} <{url}>`__ {year}."'.format(i=i+1,**entry).replace(u' ', u' '), file=fd)
else:
print(u'{i},"{author} `{journal} <{url}>`__ **{volume}**, {pages} ({year})."'.format(i=i+1,**entry).replace(u' ', u' '), file=fd)
def plot(self):
'''Creates a plot from the .csv files containing the bibliographic data.
'''
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
labels = {'orbkit': 'ORBKIT', 'detci': 'detCI@ORBKIT'}
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6))
xticks = []
max_y = float('-inf')
for ie, entry in enumerate(self.entries):
ydata = []
xdata = []
for i, item in enumerate(entry):
xdata.append(item['ym'])
ydata.append(i+1)
xdata = numpy.array(xdata)
ydata = numpy.array(ydata)
if numpy.amax(ydata) > max_y:
max_y = int(round(numpy.amax(ydata),0))
for x in xdata:
if int(round(x,0)) not in xticks:
xticks.append(int(round(x,0)))
plt.plot(xdata, ydata, linewidth=2.5, color=tableau20[2*ie], label=labels[self.filenames[ie]])
plt.plot(xdata, ydata,'ro',marker='o', ms=8 , color=tableau20[2*ie])
#plt.tight_layout()
plt.rc('font',family='Serif')
plt.legend(bbox_to_anchor=(1.0, 1.0), loc=1, borderaxespad=0.0)
dx = int(round((xticks[-1] - xticks[0]) / 6, 0))
dy = int(round(max_y / 6, 0))
plt.xticks(xticks, [str(x) for x in xticks], fontsize=14)
plt.axis([xticks[0]-.17, xticks[-1] + .17, -dy/5, max_y + 1.2*dy])
plt.xlabel('date', fontsize=14)
plt.ylabel('total citations', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.savefig('citations.png', dpi=100)
#plt.show()
| lgpl-3.0 |
Chilipp/nc2map | tests/onecbar_test.py | 1 | 1813 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""this script tests whether the creation and update of one colorbar works.
Check the output (onecbar_test.pdf and onecbar_test.gif)"""
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import nc2map
import time
import datetime as dt
testdict = {
'bounds': ['rounded',11,[25,75]],
'clabel': '%(long_name)s [%(units)s] in %B, %Y',
'cmap': 'winter',
'cticksize': 'x-large',
'ctickweight': 'bold',
'extend': 'both',
'labelsize': 'large',
'labelweight': 'bold',
'plotcbar': 'r',
'ticklabels': ['test%i' % i for i in xrange(5)],
'ticks': 2,
}
output = "onecbar_test.pdf"
pdf = PdfPages(output)
t = dt.datetime.now()
mymaps = nc2map.Maps('../demo/demo-t2m-u-v.nc', vlst=['u', 'v'], ax=(1,2))
plt.show(block=False)
# test creation and removing of cbar
mymaps.update(title='Colorbar created for %(var)s')
mymaps.update_cbar(var='u', clabel='%(var)s')
pdf.savefig(plt.gcf())
mymaps.update(title='Colorbar removed')
mymaps.removecbars(var='u')
pdf.savefig(plt.gcf())
mymaps.update(title='Colorbar recreated for %(var)s')
mymaps.update_cbar(var=['u', 'v'])
pdf.savefig(plt.gcf())
for key, val in testdict.items():
print("updating " + str(key) + " to " + str(val))
strval = str(val).replace('{', '{{').replace('}', '}}')
mymaps.update(title=str(key) + ": " + strval)
mymaps.update_cbar(**{key: val})
pdf.savefig(plt.gcf())
#time.sleep(2)
mymaps.update_cbar(todefault=True, var=['u', 'v'])
mymaps.update(title='Done')
pdf.savefig(plt.gcf())
print("Saving to " + output)
pdf.close()
print("Time needed: " + str(dt.datetime.now()-t))
mymaps.update(title='%(long_name)s (%(var)s)')
mymaps.update_cbar(clabel='%B, %Y')
mymaps.make_movie(output.replace('.pdf', '')+'.gif')
| gpl-2.0 |
SitiBanc/1061_NCTU_IOMDS | 1101/Homework 6/HW6.py | 1 | 6195 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 1 21:07:02 2017
@author: sitibanc
"""
import os, math, random
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
def loadIMG(file_path):
# Load File
os.chdir(file_path) # Change directory
filelist = os.listdir() # 檔案位置list
x = np.zeros((len(filelist), 19 * 19)) # 準備檔案數 * 361 pixels
# Read Image
for i in range(len(filelist)):
IMG = Image.open(filelist[i])
x[i, :] = np.array(IMG.getdata()) # 將IMG中19*19的資料拉成1維陣列
return x
def BPNNtrain(pf, nf, hn, lr, iteration):
# postive feature, negative featue, hidden nodes number, learning rate, learning times
pn = pf.shape[0]
nn = nf.shape[0]
fn = pf.shape[1] # feature number
model = dict()
# Randomly assign initial weights
WI = np.random.normal(0, 1, (fn + 1, hn)) # input to hidden layer weights (plus 1 means constant aka w0)平移sigmoid fumction
WO = np.random.normal(0, 1, (hn + 1, 1)) # hidden to output layer weights
feature = np.append(pf, nf, axis = 0)
target = np.append(np.ones((pn, 1)), np.zeros((nn, 1)), axis = 0)
for t in range(iteration):
s = random.sample(range(pn + nn), pn + nn) # shuffle training data (mix positive and negative data)
for i in range(pn + nn):
ins = np.append(feature[s[i], :], 1) # input signal (adding constant) <-- actual output
ho = ins.dot(WI) # hidden layer output (matrix multiplication)
for j in range(hn):
ho[j] = 1 / (1 + math.exp(-ho[j])) # sigmoid function (constraint value to be within 0~1)
hs = np.append(ho, 1) # hidden layer signal (adding constant) <-- actual output
out = hs.dot(WO) # matrix multiplication (multiply weights)
out = 1 / (1 + math.exp(-out))
# Gradient descent
dk = out * (1 - out) * (target[s[i]] - out) # delta value of output node
dh = ho * (1 - ho) * WO[0:hn, 0] * dk # delta value of hidden nodes
# Update weights
WO[:, 0] = WO[:, 0] + lr * dk * hs
for j in range(hn):
WI[:, j] = WI[:, j] + lr * dh[j] * ins
model['WI'] = WI
model['WO'] = WO
return model
def BPNNtest(feature, model):
sn = feature.shape[0] # sample number
WI = model['WI'] # input to hidden layer weights
WO = model['WO'] # hidden to output layer weights
hn = WI.shape[1] # hidden nodes number
out = np.zeros((sn, 1)) # model predict value
for i in range(sn):
ins = np.append(feature[i, :], 1) # adding constant
ho = ins.dot(WI) # multiply input-to-hidden weights
for j in range(hn):
ho[j] = 1 / (1 + math.exp(-ho[j])) # apply sigmoid function
hs = np.append(ho, 1) # adding constant
out[i] = hs.dot(WO) # multiply hidden-to-output weights
out[i] = 1 / (1 + math.exp(-out[i])) # apply sigmoid function
return out
def calROC(network, p, n, hn, lr, t):
pscore = BPNNtest(p, network)
nscore = BPNNtest(n, network)
# Calculate TPR & FPR with different thresholds
ROC = np.zeros((100, 2))
for t in range(100):
# Calculate True Postive Rate & False Positive Rate
threshold = (t + 1) / 100
for i in range(len(pscore)):
if pscore[i, 0] >= threshold:
ROC[t, 0] += 1 / len(pscore) # True Positive / Actual Positive
for i in range(len(nscore)):
if nscore[i, 0] >= threshold:
ROC[t, 1] += 1 / len(nscore) # False Positive / Actual Negative
return ROC
# Load Data
trainface = loadIMG('/home/sitibanc/1061_NCTU_IOMDS/1101/Course Material/CBCL/train/face') / 255
trainnonface = loadIMG('/home/sitibanc/1061_NCTU_IOMDS/1101/Course Material/CBCL/train/non-face') / 255
testface = loadIMG('/home/sitibanc/1061_NCTU_IOMDS/1101/Course Material/CBCL/test/face') / 255
testnonface = loadIMG('/home/sitibanc/1061_NCTU_IOMDS/1101/Course Material/CBCL/test/non-face') / 255
plt.xlabel('FPR')
plt.ylabel('TPR')
# Test hidden nodes number
hn = [20, 30, 40]
lr = [0.01] * 3
it = [10] * 3
for i in range(len(hn)):
network = BPNNtrain(trainface, trainnonface, hn[i], lr[i], it[i])
ROC = calROC(network, trainface, trainnonface, hn[i], lr[i], it[i])
plt.plot(ROC[:, 1], ROC[:, 0])
ROC = calROC(network, testface, testnonface, hn[i], lr[i], it[i])
plt.plot(ROC[:, 1], ROC[:, 0])
print('\nTest Result:\nHidden Nodes Number:', hn[i], '\nLearning Rate:', lr[i], '\nIteration Times:', it[i])
plt.show()
# Test learning rate
hn = [20] * 3
lr = [0.01, 0.1, 0.2]
it = [10] * 3
for i in range(len(hn)):
network = BPNNtrain(trainface, trainnonface, hn[i], lr[i], it[i])
ROC = calROC(network, trainface, trainnonface, hn[i], lr[i], it[i])
plt.plot(ROC[:, 1], ROC[:, 0])
ROC = calROC(network, testface, testnonface, hn[i], lr[i], it[i])
plt.plot(ROC[:, 1], ROC[:, 0])
print('\nTest Result:\nHidden Nodes Number:', hn[i], '\nLearning Rate:', lr[i], '\nIteration Times:', it[i])
plt.show()
# Test Iteration Times
hn = [20] * 3
lr = [0.01] * 3
it = [10, 20, 30]
for i in range(len(hn)):
network = BPNNtrain(trainface, trainnonface, hn[i], lr[i], it[i])
ROC = calROC(network, trainface, trainnonface, hn[i], lr[i], it[i])
plt.plot(ROC[:, 1], ROC[:, 0])
ROC = calROC(network, testface, testnonface, hn[i], lr[i], it[i])
plt.plot(ROC[:, 1], ROC[:, 0])
print('\nTest Result:\nHidden Nodes Number:', hn[i], '\nLearning Rate:', lr[i], '\nIteration Times:', it[i])
plt.show() | apache-2.0 |
lsst-dm/great3-public | great3sims/galaxies.py | 1 | 52001 | # Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""File containing the classes that generate parameters and catalogs for galaxies."""
import galsim
import pyfits
import os
import numpy as np
import math
from . import constants
def makeBuilder(real_galaxy, obs_type, shear_type, multiepoch, gal_dir, preload, gal_pairs):
"""Return a GalaxyBuilder appropriate for the given options.
@param[in] real_galaxy If True, we should use real galaxy images instead of analytic models.
@param[in] obs_type Observation type: either "ground" or "space".
@param[in] shear_type Shear type: either "constant" or "variable".
(This information is used to decide on a scheme for shape
noise cancellation.)
@param[in] multiepoch If True, this is a multiepoch simulation.
@param[in] gal_dir Directory with galaxy catalog information.
@param[in] preload Preload the RealGalaxyCatalog for realistic galaxy branches?
@param[in] gal_pairs For constant shear branches, should it use 90 degree rotated pairs to
cancel out shape noise, or not?
"""
# The COSMOSGalaxyBuilder is the builder for both parametric and real galaxies based on an HST
# training set, so we return one in either case.
return COSMOSGalaxyBuilder(real_galaxy=real_galaxy, obs_type=obs_type,
shear_type=shear_type, multiepoch=multiepoch,
gal_dir=gal_dir, preload=preload, gal_pairs=gal_pairs)
class GalaxyBuilder(object):
"""A GalaxyBuilder is a class that can carry out the steps necessary to define a galaxy
population for GREAT3. It must be able to generate parameters of the galaxy population, and
make a galaxy catalog (even a very simple one)."""
def generateSubfieldParameters(self, rng, subfield_index):
"""Return a dict of metaparameters for the given subfield. These will be passed to
generateCatalog() when it is called.
@param[in] rng A galsim.UniformDeviate to be used for any random numbers.
@param[in] subfield_index Index of the simulated patch of sky.
A "schema" entry is required in the returned dict: a list of (name, type) tuples that define
the catalog fields that will be filled by this builder. These field names may be used in
catalogs that also have fields from other builders, so care should be taken to ensure the
schema entries are unique.
"""
raise NotImplementedError("GalaxyBuilder is abstract.")
def generateCatalog(self, rng, catalog, parameters, variance, noise_mult, seeing):
"""Fill columns of the given catalog with per-object galaxy parameters.
@param[in] rng A galsim.UniformDeviate to be used for any random numbers.
@param[in,out] catalog A structured NumPy array to fill. The 'index', 'x', and 'y'
columns will already be filled, and should be used when ensuring
that the shape noise is pure B-mode for variable shear branches.
All columns defined by the 'schema' entry in the dict returned by
generateSubfieldParameters() will also be present, and should be
filled. Other columns may be present as well, and should be
ignored.
@param[in] parameters A dict of metaparameters, as returned by the
generateSubfieldParameters() method.
@param[in] variance A typical noise variance that will be added, so we can avoid those
galaxies with too high / low S/N. This does not include any
reduction in noise for the deep fields or for multiepoch imaging,
so we can impose galaxy selection to get only those that would be
seen in the non-deep fields at reasonable S/N.
@param[in] noise_mult A factor by which the noise variance will be multiplied in actual
image generation (will be <1 for deep fields). This will be
necessary so we can check that the requested noise variance is
not below that in the real images, which would make image
generation impossible for the real galaxy case.
@param[in] seeing A value of seeing to use when applying cuts. Can be None for
space data (since in that case, the input seeing is ignored).
"""
raise NotImplementedError("GalaxyBuilder is abstract.")
def makeConfigDict(self):
"""Make the 'gal' portion of the config dict that will be used by GalSim to generate the
images.
"""
raise NotImplementedError("GalaxyBuilder is abstract.")
def makeGalSimObject(self, record, parameters, xsize, ysize, rng):
"""Given a catalog record, a dict of metaparameters (as generated
by generateCatalog() and generateSubfieldParameters(), respectively), and a
galsim.UniformDeviate, return the following objects:
- a galsim.GSObject that represents the galaxy (not including the shear due to lensing).
- a galsim.CorrelatedNoise object that can be used to whiten the noise already present in
the galaxy object, after the same shear and convolutions are applied to it as the galaxy
object, or None if there is no noise in the galaxy object.
The returned galaxy should have all sizes in arcsec, though positional offsets can be
specified in pixels when drawing in builder.py. Conversions can be performed using
constants.pixel_scale when necessary.
The maximum size of the postage stamp image that will be created is passed as well, to allow
a RealGalaxy object to be noise-padded before the original pixel response is deconvolved
from the noise (which should be done before it is returned).
NOTE: This function was not used when making the GREAT3 images, since we used the GalSim
config interface rather than generating the images in python scripts. However, this
function can be used in small-scale tests for which it's not too expensive to generate the
images in python in a non-parallel way.
"""
raise NotImplementedError("GalaxyBuilder is abstract.")
def _gammafn(x):
"""The gamma function is present in python2.7's math module, but not 2.6. So try using that,
and if it fails, use some code from RosettaCode:
http://rosettacode.org/wiki/Gamma_function#Python
"""
try:
import math
return math.gamma(x)
except:
y = float(x) - 1.0;
sm = _gammafn._a[-1];
for an in _gammafn._a[-2::-1]:
sm = sm * y + an;
return 1.0 / sm;
_gammafn._a = ( 1.00000000000000000000, 0.57721566490153286061, -0.65587807152025388108,
-0.04200263503409523553, 0.16653861138229148950, -0.04219773455554433675,
-0.00962197152787697356, 0.00721894324666309954, -0.00116516759185906511,
-0.00021524167411495097, 0.00012805028238811619, -0.00002013485478078824,
-0.00000125049348214267, 0.00000113302723198170, -0.00000020563384169776,
0.00000000611609510448, 0.00000000500200764447, -0.00000000118127457049,
0.00000000010434267117, 0.00000000000778226344, -0.00000000000369680562,
0.00000000000051003703, -0.00000000000002058326, -0.00000000000000534812,
0.00000000000000122678, -0.00000000000000011813, 0.00000000000000000119,
0.00000000000000000141, -0.00000000000000000023, 0.00000000000000000002
)
class COSMOSGalaxyBuilder(GalaxyBuilder):
"""A GalaxyBuilder subclass for making COSMOS-based galaxies. It uses keyword arguments to
decide whether to use a parametric version of a particular galaxy, or the real HST image.
"""
## Decide on some meta-parameters.
# What fraction of flux should we require there to be in the postage stamp?
min_flux_frac = 0.99
# What is minimum resolution factor to use?
min_resolution = 1./3
# Size rescaling to apply to simulate a fainter sample. See the GREAT3 handbook for details.
size_rescale = 0.6
# Names for RealGalaxyCatalog files, including selection criteria.
rgc_file = 'real_galaxy_catalog_23.5.fits'
rgc_fits_file = 'real_galaxy_catalog_23.5_fits.fits'
rgc_im_sel_file = 'real_galaxy_image_selection_info.fits'
rgc_sel_file = 'real_galaxy_selection_info.fits'
rgc_shapes_file = 'real_galaxy_23.5_shapes.fits'
rgc_dmag_file = 'real_galaxy_deltamag_info.fits'
rgc_mask_file = 'real_galaxy_mask_info.fits'
# Minimum S/N to allow: something a bit below 20, so we don't have an absurdly sharp cutoff at
# our target.
sn_min = 17.0
# Maximum S/N to allow: let's not try to make super bright objects that might not be used in a
# typical shear analysis.
sn_max = 100.0
# Values of seeing for which we have precomputed results.
min_ground_ind = 2 # array index 2 is the first for ground
min_ground_fwhm = 0.5 # minimum value of FWHM for which results are tabulated
ground_dfwhm = 0.15 # spacing between tabulated FWHM values
ground_nfwhm = 4 # number of FWHM values for ground
noise_fail_val = 1.e-10 # number to assign for negative noise variance values, then discard
# Set up empty cache for B-mode shape noise galsim.PowerSpectrum object (will only use if
# variable shear)
cached_ps = None
# And define parameters needed for PS generation for B-mode intrinsic shear field, for the
# variable shear case.
kmin_factor = 1
kmax_factor = 16
def __init__(self, real_galaxy, obs_type, shear_type, multiepoch, gal_dir, preload, gal_pairs):
"""Construct for this type of branch.
"""
# Basic parameters used by GalaxyBuilder to make decisions about galaxy population
self.real_galaxy = real_galaxy
self.obs_type = obs_type
self.shear_type = shear_type
self.multiepoch = multiepoch
self.gal_dir = gal_dir
if self.real_galaxy == True:
self.preload = preload
else:
self.preload = False
self.gal_pairs = gal_pairs
def generateSubfieldParameters(self, rng, subfield_index):
# At this point, we only want to generate schema. Everything else happens when making the
# catalog. The schema are different for real_galaxy and parametric galaxy branches.
# The schema will include a place-holder for the approximate S/N value per galaxy based on
# pre-computed numbers. We used this in our tests of the catalog.
if self.real_galaxy:
gal_schema = [("rot_angle_radians", float), ("gal_sn", float), ("cosmos_ident", int),
("size_rescale", float), ("flux_rescale", float),
("g1_intrinsic", float), ("g2_intrinsic", float)]
else:
gal_schema=[("bulge_n", float), ("bulge_hlr", float),
("bulge_q", float), ("bulge_beta_radians", float), ("bulge_flux", float),
("disk_hlr", float), ("disk_q", float),
("disk_beta_radians", float), ("disk_flux", float), ("gal_sn", float),
("cosmos_ident", int), ("g1_intrinsic", float), ("g2_intrinsic", float)]
return dict(schema=gal_schema, subfield_index=subfield_index)
def generateCatalog(self, rng, catalog, parameters, variance, noise_mult, seeing=None):
# Set up basic selection.
# For space, the resolution and other selection criteria are one-dimensional arrays.
# For ground, they are lists of galsim.LookupTables that can be used to interpolate to our
# value of FWHM. However, we should watch out for the min / max values of FWHM, so let's
# check for those first.
if self.obs_type == "ground":
tmp_seeing = seeing
if tmp_seeing < self.min_ground_fwhm:
tmp_seeing = self.min_ground_fwhm
max_ground_fwhm = self.min_ground_fwhm + self.ground_dfwhm*(self.ground_nfwhm-1)
if tmp_seeing > max_ground_fwhm:
tmp_seeing = max_ground_fwhm
# Note: For the multi-epoch case, the `seeing` value that was passed in was some effective
# value over all the epochs; for variable PSF, it was an effective value taking into account
# variation within the image.
# If we haven't yet read in the real galaxy catalog yet (meaning this is the first catalog
# we're generating for this branch), then do so now, and save it for later calls of
# generateCatalog():
if not hasattr(self,'rgc'):
# Read in RealGalaxyCatalog, fits.
self.rgc = galsim.RealGalaxyCatalog(self.rgc_file, dir=self.gal_dir,
preload=self.preload)
self.fit_catalog = pyfits.getdata(os.path.join(self.gal_dir, self.rgc_fits_file))
# Read in precomputed shapes file, to use for B-mode shape noise and for overall
# selection.
self.shapes_catalog = pyfits.getdata(os.path.join(self.gal_dir,
self.rgc_shapes_file))
# Read in basic selection flags.
self.selection_catalog = pyfits.getdata(os.path.join(self.gal_dir, self.rgc_sel_file))
# This vector is just a predetermined choice of whether to use bulgefit or sersicfit.
self.use_bulgefit = self.selection_catalog.field('use_bulgefit')[:,0]
# Read in selection flags based on the images:
# Note: technically this isn't necessary for parametric fit branches, but in reality
# these remove objects that we probably don't want in either place (e.g., too-low
# surface brightness objects that show up as UFOs) and keep object selection consistent.
self.im_selection_catalog = pyfits.getdata(os.path.join(self.gal_dir,
self.rgc_im_sel_file))
# Get the S/N in the original image, measured with an elliptical Gaussian filter
# function.
self.original_sn = self.im_selection_catalog.field('sn_ellip_gauss')
# If it's a ground-based catalog, set up LookupTables to interpolate the minimum
# variance post-whitening between FWHM values. It is important to maintain consistency
# between the FWHM values used for the precomputation of minimum variances and the
# `fwhm_arr` that we build here. The FWHM values that were used are specified as
# command-line arguments to the run_props.py script in inputs/galdata/; to see which
# arguments were used and therefore FWHM values adopted, see the files pbs_props*.sh in
# that directory.
if self.obs_type == "ground":
fwhm_arr = self.min_ground_fwhm + self.ground_dfwhm*np.arange(self.ground_nfwhm)
self.noise_min_var = []
for obj in self.im_selection_catalog:
tmp_min_var = obj.field('min_var_white')[2:]
self.noise_min_var.append(galsim.LookupTable(fwhm_arr,tmp_min_var,f_log=True))
# Otherwise, for space, save a single set of results depending on whether it's single
# epoch (smaller pixels) or multiepoch (bigger pixels).
else:
# Note, if we wanted to use the actual minimum variances post-whitening on a
# per-experiment basis, we'd do
# if self.multiepoch:
# self.noise_min_var = self.im_selection_catalog.field('min_var_white')[:,1]
# else:
# self.noise_min_var = self.im_selection_catalog.field('min_var_white')[:,0]
# However, this would result in different minimum variances and galaxy selection for
# single vs. multiepoch because the pixel scales are different for space sims for
# the two cases. So, we just use the single-epoch minimum variances, which are
# higher, eliminating more objects from the sample. This is conservative for the
# multiepoch sims, but it means the selection is consistent for the two cases, which
# will be helpful in interpreting results.
self.noise_min_var = self.im_selection_catalog.field('min_var_white')[:,0]
# Read in catalog that tells us how the galaxy magnitude from Claire's fits differs from
# that in the actual COSMOS catalog. This can be used to exclude total screwiness,
# objects overly affected by blends, and other junk.
dmag_catalog = pyfits.getdata(os.path.join(self.gal_dir, self.rgc_dmag_file))
self.dmag = dmag_catalog.field('delta_mag')
# Read in the catalog that tells us which galaxies might have masking issues that make
# the postage stamps too funky to use.
mask_catalog = pyfits.getdata(os.path.join(self.gal_dir, self.rgc_mask_file))
self.average_mask_adjacent_pixel_count = \
mask_catalog['average_mask_adjacent_pixel_count']
self.peak_image_pixel_count = mask_catalog['peak_image_pixel_count']
self.peak_image_pixel_count[self.peak_image_pixel_count == 0.] = 1.e-4
self.min_mask_dist_pixels = mask_catalog['min_mask_dist_pixels']
# If this is a ground-based calculation, then set up LookupTables to interpolate
# max_variance and resolutions between FWHM values.
if self.obs_type == "ground":
self.noise_max_var = []
self.flux_frac = []
self.resolution = []
for obj in self.selection_catalog:
tmp_max_var = obj.field('max_var')[1:]
if np.any(tmp_max_var < self.noise_fail_val):
tmp_max_var = np.zeros_like(tmp_max_var) + self.noise_fail_val
self.noise_max_var.append(galsim.LookupTable(fwhm_arr,tmp_max_var,f_log=True))
self.flux_frac.append(galsim.LookupTable(fwhm_arr,obj.field('flux_frac')[1:]))
self.resolution.append(galsim.LookupTable(fwhm_arr,obj.field('resolution')[1:]))
# But if it's a space-based catalog, then just have arrays for each of the selection
# flags.
else:
self.noise_max_var = self.selection_catalog.field('max_var')[:,0]
self.flux_frac = self.selection_catalog.field('flux_frac')[:,0]
self.resolution = self.selection_catalog.field('resolution')[:,0]
# First we set up the quantities that we need to apply basic selection, and that depend on
# the type of simulation (ground / space, and ground-based seeing):
# able to measure shapes for basic tests of catalog
# fraction of flux in our postage stamp size [given seeing]
# resolution [given seeing]
# min noise variance post-whitening
indices = np.arange(self.rgc.nobjects)
if self.obs_type == "space":
noise_max_var = self.noise_max_var
flux_frac = self.flux_frac
resolution = self.resolution
noise_min_var = self.noise_min_var
else:
noise_max_var = np.zeros(self.rgc.nobjects)
flux_frac = np.zeros(self.rgc.nobjects)
resolution = np.zeros(self.rgc.nobjects)
noise_min_var = np.zeros(self.rgc.nobjects)
for gal_ind in range(self.rgc.nobjects):
noise_max_var[gal_ind] = self.noise_max_var[gal_ind](tmp_seeing)
flux_frac[gal_ind] = self.flux_frac[gal_ind](tmp_seeing)
resolution[gal_ind] = self.resolution[gal_ind](tmp_seeing)
noise_min_var[gal_ind] = self.noise_min_var[gal_ind](tmp_seeing)
# We need to estimate approximate S/N values for each object, by comparing with a
# precalculated noise variance for S/N=20 that comes from using the fits. Some of the
# values are junk for galaxies that have failure flags, so we will only do the calculation
# for those with useful values of noise_max_var. Here we use the variance that isn't for
# deep fields even if we're in a deep field, because we want to require that the object
# would be seen at S/N>=20 if the field weren't deep.
approx_sn_gal = np.zeros(self.rgc.nobjects)
approx_sn_gal[noise_max_var > self.noise_fail_val] = \
20.0*np.sqrt(noise_max_var[noise_max_var > self.noise_fail_val] / variance)
# Apply all selections:
# (1) no problematic flags,
# (2) magnitudes in the parametric fit catalog and COSMOS catalog should differ by <=1,
# (3) a large fraction of the flux should be in the postage stamp,
# (4) object should be resolved,
# (5, 6) SN should be in the [min, max] range that we want for the simulations,
# (7) maximum noise variance to add should not be nonsense.
#
# And for variable shear, we need to require a shape to be used for B-mode shape noise.
# This means proper shape measurement flags, and |e| < 1. However, for uniformity of
# selection we will also impose this cut on constant shear sims.
#
# In addition, for realistic galaxies, we require
# (a) that the S/N in the original image be >=20 [really we want higher since we're adding
# noise, but mostly we're using this as a loose filter to get rid of junk], and
# (b) that the requested noise variance in the sims should be > the minimum noise variance
# that is possible post-whitening.
# We impose these even for parametric fits, just because the failures tend to be ones with
# problematic fits as well, and impose the cut for the variance in the deep fields since we
# want to represent the same variance in both deep and wide. We don't include any change in
# variance for multiepoch because the original noise gets decreased by some factor as well.
# We include a 4% fudge factor here because the minimum noise variance post-whitening was
# estimated in a preprocessing step that didn't include some details of the real
# simulations.
#
# And yet another set of cuts: to avoid postage stamps with poor masking of nearby objects /
# shredding of the central object, we exclude objects that have a mask pixel nearer to the
# center than 11 pixels (0.33"). And we exclude objects whose nearest masked pixel has a
# flux brighter than 0.2 * the brightest unmasked pixel.
# The `mask_cond` array is True for all objects that are not excluded.
e1 = self.shapes_catalog.field('e1')
e2 = self.shapes_catalog.field('e2')
e_test = np.sqrt(e1**2 + e2**2)
mask_cond = np.logical_or.reduce(
[self.min_mask_dist_pixels > 11,
self.average_mask_adjacent_pixel_count/self.peak_image_pixel_count < 0.2
])
# Impose alllll of the above conditions on the catalog.
cond = np.logical_and.reduce(
[self.selection_catalog.field('to_use') == 1,
np.abs(self.dmag) < 0.8,
flux_frac >= self.min_flux_frac,
resolution >= self.min_resolution,
approx_sn_gal >= self.sn_min,
approx_sn_gal <= self.sn_max,
noise_max_var > self.noise_fail_val,
self.shapes_catalog.field('do_meas') > -0.5,
e_test < 1.,
self.original_sn >= 20.,
noise_min_var <= 0.96*variance*constants.deep_variance_mult,
mask_cond
])
useful_indices = indices[cond]
print " / Possible galaxies: ",len(useful_indices)
# Note on the two image-based cuts: without them, for some example run, we lost a few %
# (ground) and ~20% (space) of the sample. For the latter, the change is driven by the fact
# that more noise has to be added to whiten, so it's harder to pass the minimum-variance cut
# for the deep fields.
# Note on the two mask cuts: when we impose these, the sample for space-based sims decreases
# by another 1%.
# In the next bit, we choose a random selection of objects to use out of the above
# candidates. Note that this part depends on constant vs. variable shear and on whether or
# not we're using 90 degree rotated pairs.
n_to_select = constants.nrows*constants.ncols
if self.shear_type == "constant" and self.gal_pairs:
n_to_select /= 2
# Select an index out of these, at random, and with replacement; however, need to apply
# size-dependent weight because of failure to make postage stamps preferentially for large
# galaxies. Note: no weighting to account for known LSS fluctuations in COSMOS field.
use_indices = np.zeros(n_to_select)
for ind in range(n_to_select):
# Select a random value in [0...len(useful_indices)-1], which tells the index in the
# rgc.
rand_value = int(np.floor(rng() * len(useful_indices)))
rand_index = np.int(useful_indices[rand_value])
# Also select a test random number from 0-1.
test_rand = rng()
# If that test random number is > the weight for that galaxy in the rgc, then try again;
# otherwise, keep.
while test_rand > self.rgc.weight[rand_index]:
rand_value = int(np.floor(rng() * len(useful_indices)))
rand_index = useful_indices[rand_value]
test_rand = rng()
use_indices[ind] = np.int(rand_index)
# Set up arrays with indices and rotation angles to ensure shape noise cancellation. The
# method of doing this depends on the shear type.
all_indices = np.zeros(constants.nrows*constants.ncols)
rot_angle = np.zeros(constants.nrows*constants.ncols)
# However, we first get some basic information about the galaxies which will be necessary
# for tests of shape noise cancellation, whether for constant or variable shear.
e1 = self.shapes_catalog.field('e1')
e2 = self.shapes_catalog.field('e2')
emag = np.sqrt(e1**2 + e2**2)
ephi = 0.5 * np.arctan2(e2, e1)
# Only do e->g conversion for those with |e|<1; those that violate that condition should
# already have been excluded using flags.
gmag = np.zeros_like(emag)
gmag[emag<1.] = emag[emag<1.] / (1.0+np.sqrt(1.0 - emag[emag<1.]**2))
if self.shear_type == "constant":
if self.gal_pairs:
# Make an array containing all indices (each repeated twice) but with rotation angle
# of pi/2 for the second set. Include a random rotation to get rid of any coherent
# shear in the COSMOS galaxies.
all_indices[0:n_to_select] = use_indices
all_indices[n_to_select:constants.nrows*constants.ncols] = use_indices
for ind in range(0,n_to_select):
rot_angle[ind] = rng() * np.pi
rot_angle[n_to_select:constants.nrows*constants.ncols] = np.pi/2. + \
rot_angle[0:n_to_select]
# But it would be kind of silly to include them in this order, so scramble them. My
# favorite python routine for this is np.random.permutation, but we have to make
# sure to give it a seed (chosen from our own RNG) so that this process will be
# repeatable.
np.random.seed(int(rng() * 1000))
perm_array = np.random.permutation(constants.nrows*constants.ncols)
all_indices = all_indices[perm_array]
rot_angle = rot_angle[perm_array]
else:
all_indices = use_indices
for ind in range(0,n_to_select):
rot_angle[ind] = rng() * np.pi
else:
# For variable shear, it's more complicated: we need B-mode shape noise. To generate
# it, get the intrinsic shears from our precomputed tables.
g1 = gmag[use_indices.astype(int)] * np.cos(2.*ephi[use_indices.astype(int)])
g2 = gmag[use_indices.astype(int)] * np.sin(2.*ephi[use_indices.astype(int)])
gvar = g1.var() + g2.var()
# First, generate a B-mode shape noise field, or use the tabulated one if we're not the
# first subfield in a field. We have to choose a variance based on the p(|g|) for the
# galaxies that we're actually using (will assume this is basically constant across
# subfields, which should be true when selecting ~10k galaxies).
#
# First check if cache is empty or if this is the first subfield in a field, so we know
# whether to use cached shape noise field (we use n_subfields_per_field based on
# variable shear, which is the same for constant or variable PSF, so fudge this since
# galaxy builders don't have a variable_psf attribute). Much of the code below comes
# from shear.py, which does operationally the same caching process to the cosmological
# shear field, since that also needs to be determined at the field level.
n_subfields_per_field = constants.n_subfields_per_field['variable'][True]
if self.cached_ps is None or \
parameters["galaxy"]["subfield_index"] % n_subfields_per_field == 0:
# If this is the first subfield in the field, generate a new B-mode shape noise
# field.
#
# Begin by calculating the grid_spacing, as this impacts the scaling of the PS.
n_grid = constants.subfield_grid_subsampling * constants.nrows
grid_spacing = constants.image_size_deg / n_grid
# Then build the power spectrum.
self.cached_ps = galsim.PowerSpectrum(
b_power_function=lambda k_arr : (
gvar * np.ones_like(k_arr) * grid_spacing**2
/ (float(self.kmax_factor**2) - 1. / (self.kmin_factor**2))), # Get the right variance
units=galsim.degrees
)
# Define the grid on which we want to get the intrinsic shears.
# This is a little tricky: we have a setup for subfield locations within the field
# that is defined in builder.py function generateSubfieldOffsets(). The first
# subfield is located at the origin, and to represent it alone, we would need a
# constants.nrows x constants.ncols grid of shears. But since we subsample by a
# parameter given as constants.subfield_grid_subsampling, each grid dimension must
# be larger by that amount.
if constants.nrows != constants.ncols:
raise NotImplementedError("Currently variable shear grids require nrows=ncols")
# Run buildGrid() to get the shears and convergences on this grid. We use a value
# of `kmax_factor` that is relatively large, motivated by tests that suggested that
# B-mode shape noise was not as effective as we need it to be until large values
# were chosen.
grid_center = 0.5 * (constants.image_size_deg - grid_spacing)
self.cached_ps.buildGrid(grid_spacing = grid_spacing,
ngrid = n_grid,
units = galsim.degrees,
rng = rng,
center = (grid_center, grid_center),
kmin_factor = self.kmin_factor,
kmax_factor = self.kmax_factor)
# At this point, we have either built up a new cached B-mode shape noise field, or
# ascertained that we should use a cached one. We can now obtain g1 and g2 values for
# this B-mode shape noise field at the positions of the galaxies in this particular
# subfield. This is fastest if done all at once, with one call to getLensing(). And
# this is actually slightly tricky, because we have to take into account:
# (1) The position of the galaxy within the subfield.
# (2) The offset of the subfield with respect to the field.
# And make sure we've gotten the units right for both of these. We are ignoring
# centroid shifts of order 1 pixel (max 0.2" for ground data) which can occur within an
# image.
#
# We can define object indices in x, y directions - i.e., make indices that range
# from 0 to constants.nrows-1.
xsize = constants.xsize[self.obs_type][self.multiepoch]
ysize = constants.ysize[self.obs_type][self.multiepoch]
x_ind = (catalog["x"]+1+0.5*xsize)/xsize-1
y_ind = (catalog["y"]+1+0.5*ysize)/ysize-1
# Turn this into (x, y) positions within the subfield, in degrees.
x_pos = x_ind * constants.image_size_deg / constants.nrows
y_pos = y_ind * constants.image_size_deg / constants.ncols
# But now we have to add the subfield offset. These are calculated as a fraction of the
# separation between galaxies, so we have to convert to degrees.
x_pos += parameters["subfield_offset"][0] * constants.image_size_deg / constants.nrows
y_pos += parameters["subfield_offset"][1] * constants.image_size_deg / constants.ncols
g1_b, g2_b = self.cached_ps.getShear(pos=(x_pos, y_pos), units=galsim.degrees)
gmag_b = np.sqrt(g1_b**2 + g2_b**2)
if False:
# DEBUG: Plot the histogram of gmag to check it is reasonable
import matplotlib.pyplot as plt
print "Mean, median gmag_b = "+str(gmag_b.mean())+", "+str(np.median(gmag_b))
plt.hist(gmag_b, range=(0, 1), bins=50); plt.show()
if np.any(gmag_b > 1.):
# The shear field generated with this B-mode power function is not limited to
# |g|<1. We have to fix these:
fix_ind = gmag_b > 1.
g1_b[fix_ind] /= gmag_b[fix_ind]**2
g2_b[fix_ind] /= gmag_b[fix_ind]**2
gmag_b[fix_ind] = np.sqrt(g1_b[fix_ind]**2 + g2_b[fix_ind]**2)
gphi_b = 0.5 * np.arctan2(g2_b, g1_b)
# Match |g| between real galaxies and B-mode shape noise field according to ranking.
ind_sorted_gmag_b = np.argsort(gmag_b.flatten())
ind_sorted_gmag = np.argsort(gmag[use_indices.astype(int)].flatten())
sorted_gmag_dict = {}
for ind in range(constants.nrows * constants.ncols):
sorted_gmag_dict[ind_sorted_gmag_b[ind]] = use_indices[ind_sorted_gmag[ind]]
sorted_use_indices = np.array([sorted_gmag_dict[ind]
for ind in range(constants.nrows*constants.ncols)])
# Get the rotation angles right, once we've done the matching.
target_beta = gphi_b.flatten()
actual_beta = ephi[sorted_use_indices.astype(int)]
all_indices = sorted_use_indices.astype(int)
rot_angle = target_beta - actual_beta
# To populate catalog with fluxes, we need the number of epochs into which the flux should
# be split.
if self.multiepoch:
n_epochs = constants.n_epochs
else:
n_epochs = 1
# Now that we know which galaxies to use in which order, and with what rotation angles, we
# will populate the catalog. The next bit of code depends quite a bit on whether it is a
# real_galaxy or parametric galaxy experiment. This is also where we specify flux and size
# rescalings to mimic the deeper I<25 sample.
ind = 0
for record in catalog:
# Save COSMOS ID and intrinsic shape information, regardless of whether this is a real
# galaxy or a parametric one.
record["cosmos_ident"] = self.fit_catalog[all_indices[ind]].field('ident')
if self.shear_type == "variable":
final_g = galsim.Shear(g = gmag[all_indices[ind]],
beta=target_beta[ind]*galsim.radians)
else:
final_g = galsim.Shear(
g = gmag[all_indices[ind]],
beta=(ephi[all_indices[ind]]+rot_angle[ind])*galsim.radians
)
record["g1_intrinsic"] = final_g.g1
record["g2_intrinsic"] = final_g.g2
# Now specialize to save the appropriate info for real galaxies or parametric ones.
if self.real_galaxy:
record["gal_sn"] = approx_sn_gal[all_indices[ind]]
record["rot_angle_radians"] = rot_angle[ind]
record["size_rescale"] = self.size_rescale
record["flux_rescale"] = 1. / n_epochs
else:
# Information that we will save for parametric galaxies depends on whether we use 1-
# or 2-component fits.
if self.use_bulgefit[all_indices[ind]] == 1.:
params = self.fit_catalog[all_indices[ind]].field('bulgefit')
(fit_disk_flux, fit_disk_hlr, fit_disk_n, fit_disk_q, _, _, _, fit_disk_beta,
fit_bulge_flux, fit_bulge_hlr, fit_bulge_n, fit_bulge_q, _, _, _,
fit_bulge_beta) = params
bulge_q = fit_bulge_q
# Fit files store position angles as radians.
bulge_beta = fit_bulge_beta*galsim.radians + rot_angle[ind]*galsim.radians
# Half-light radii in files need several corrections:
# (1) They are in pixels, so we multiply by 0.03" (the coadded pixel scale)
# to get arcsec.
# (2) We are rescaling the galaxy sizes by self.size_rescale in order to
# mimic a fainter galaxy sample in which galaxies are naturally smaller,
# as described in the handbook.
# (3) The files give the half-light radius along the major axis, but for
# GalSim we want the azimuthally-averaged half-light radius, so we
# multiply by sqrt(q)=sqrt(b/a).
bulge_hlr = 0.03*self.size_rescale*np.sqrt(bulge_q)*fit_bulge_hlr
# Fluxes in the files require several corrections:
# (1) The "flux" values are actually surface brightness at the half-light
# radius along the major axis. Thus we need to integrate the
# surface-brightness profile to get the total flux, which introduces
# 2*pi*(half-light radius)^2 * some Sersic n-dependent fudge factors
# (Gamma functions, etc.). The 3.607 in the line below is the Sersic
# n-dependent factor for n=4. Note that the full expression is given in
# the lines of code below for the Sersic-fit profiles.
# (2) The division by self.size_rescale**2 is just to correct for the fact
# that the bulge half-light radii have already been decreased by this
# factor, but that factor wasn't in the original fit profiles and hence
# should not go into the flux calculation.
# (3) The division by 0.03**2 is because the fits assumed the images
# were flux when really they were surface brightness, so the fluxes from
# the fit outputs are too low by 0.03**2.
bulge_flux = \
2.0*np.pi*3.607*(bulge_hlr**2)*fit_bulge_flux/self.size_rescale**2/(0.03**2)
disk_q = fit_disk_q
disk_beta = fit_disk_beta*galsim.radians + rot_angle[ind]*galsim.radians
disk_hlr = 0.03*self.size_rescale*np.sqrt(disk_q)*fit_disk_hlr # arcsec
# Here the 1.901 is the Sersic n-dependent factor described above, but for n=1.
disk_flux = \
2.0*np.pi*1.901*(disk_hlr**2)*fit_disk_flux/self.size_rescale**2/(0.03**2)
record["gal_sn"] = approx_sn_gal[all_indices[ind]]
bulge_frac = bulge_flux / (bulge_flux + disk_flux)
record["bulge_n"] = 4.0
record["bulge_hlr"] = bulge_hlr
record["bulge_q"] = bulge_q
record["bulge_beta_radians"] = bulge_beta/galsim.radians
record["bulge_flux"] = bulge_flux / n_epochs
record["disk_hlr"] = disk_hlr
record["disk_q"] = disk_q
record["disk_beta_radians"] = disk_beta/galsim.radians
record["disk_flux"] = disk_flux / n_epochs
else:
# Make a single Sersic model instead
params = self.fit_catalog[all_indices[ind]].field('sersicfit')
(fit_gal_flux, fit_gal_hlr, fit_gal_n, fit_gal_q, _, _, _, fit_gal_beta) = \
params
gal_n = fit_gal_n
# Fudge this if it is at the edge of the allowed n values. Now that GalSim #325
# and #449 allow Sersic n in the range 0.3<=n<=6, the only problem is that the
# fits occasionally go as low as n=0.2.
if gal_n < 0.3: gal_n = 0.3
gal_q = fit_gal_q
gal_beta = fit_gal_beta*galsim.radians + rot_angle[ind]*galsim.radians
gal_hlr = 0.03*self.size_rescale*np.sqrt(gal_q)*fit_gal_hlr
# Below is the calculation of the full Sersic n-dependent quantity that goes
# into the conversion from surface brightness to flux, which here we're calling
# 'prefactor'. In the n=4 and n=1 cases above, this was precomputed, but here
# we have to calculate for each value of n.
tmp_ser = galsim.Sersic(gal_n, half_light_radius=1.)
gal_bn = (1./tmp_ser.getScaleRadius())**(1./gal_n)
prefactor = gal_n * _gammafn(2.*gal_n) * math.exp(gal_bn) / (gal_bn**(2.*gal_n))
gal_flux = 2.*np.pi*prefactor*(gal_hlr**2)*fit_gal_flux/self.size_rescale**2/0.03**2
record["gal_sn"] = approx_sn_gal[all_indices[ind]]
record["bulge_n"] = gal_n
record["bulge_hlr"] = gal_hlr
record["bulge_q"] = gal_q
record["bulge_beta_radians"] = gal_beta/galsim.radians
record["bulge_flux"] = gal_flux / n_epochs
record["disk_hlr"] = 1.0
record["disk_q"] = 1.0
record["disk_beta_radians"] = 0.0
record["disk_flux"] = 0.0
ind += 1
def makeConfigDict(self):
"""Routine to write the galaxy-related parts of the config file used by GalSim to generate
images. In practice, it checks whether the galaxies are real or parametric, and then calls
functions that are specialized to those cases."""
if self.real_galaxy:
return self.makeConfigDictReal()
else:
return self.makeConfigDictParametric()
def makeConfigDictParametric(self):
"""Routine to write the galaxy-related parts of the config file used by GalSim to generate
images for parametric galaxies."""
d = {
'type' : 'Sum',
'items' : [
{
'type' : 'Sersic',
'n' : { 'type' : 'Catalog', 'col' : 'bulge_n' },
'half_light_radius' : { 'type' : 'Catalog', 'col' : 'bulge_hlr' },
'ellip' : {
'type' : 'QBeta',
'q' : { 'type' : 'Catalog', 'col' : 'bulge_q' },
'beta' : { 'type' : 'Rad',
'theta' : { 'type' : 'Catalog', 'col' : 'bulge_beta_radians' }
},
},
'flux' : { 'type' : 'Catalog', 'col' : 'bulge_flux' }
},
{
'type' : 'Exponential',
'half_light_radius' : { 'type' : 'Catalog', 'col' : 'disk_hlr' },
'ellip' : {
'type' : 'QBeta',
'q' : { 'type' : 'Catalog', 'col' : 'disk_q' },
'beta' : { 'type' : 'Rad',
'theta' : { 'type' : 'Catalog', 'col' : 'disk_beta_radians' }
},
},
'flux' : { 'type' : 'Catalog', 'col' : 'disk_flux' }
}
]
}
return d
def makeConfigDictReal(self):
"""Routine to write the galaxy-related parts of the config file used by GalSim to generate
images for real galaxies."""
noise_pad_size = int(np.ceil(constants.xsize[self.obs_type][self.multiepoch] *
np.sqrt(2.) *
constants.pixel_scale[self.obs_type][self.multiepoch]))
d = {
'type' : 'RealGalaxy',
'id' : { 'type' : 'Catalog', 'col' : 'cosmos_ident' },
'noise_pad_size' : noise_pad_size,
'dilate' : { 'type' : 'Catalog', 'col' : 'size_rescale' },
'scale_flux' : { 'type' : 'Catalog', 'col' : 'flux_rescale' },
'rotate' : { 'type' : 'Rad',
'theta' : { 'type' : 'Catalog', 'col' : 'rot_angle_radians' }
},
# remove this line because Galsim doesn't like it: 'whiten' : True
}
return d
def makeGalSimObject(self, record, parameters, xsize, ysize, rng):
"""Routine to return a galsim.GSObject corresponding to a particular galaxy in the catalog."""
if self.real_galaxy:
return self.makeGalSimObjectReal(record, parameters, xsize, ysize, rng)
else:
return self.makeGalSimObjectParametric(record, parameters, xsize, ysize, rng)
def makeGalSimObjectParametric(self, record, parameters, xsize, ysize, rng):
"""Routine to return a galsim.GSObject corresponding to a particular (parametric) galaxy in
the catalog."""
# Specify sizes in arcsec.
if record['bulge_flux'] > 0.:
# First make a bulge
bulge = galsim.Sersic(record['bulge_n'], flux = record['bulge_flux'],
half_light_radius = record['bulge_hlr'])
if record['bulge_q'] < 1.:
bulge.applyShear(q=record['bulge_q'],
beta=record['bulge_beta_radians']*galsim.radians)
# Then optionally make a disk
if record['disk_flux'] > 0.:
disk = galsim.Exponential(flux = record['disk_flux'],
half_light_radius = record['disk_hlr'])
if record['disk_q'] < 1.:
disk.applyShear(q=record['disk_q'],
beta=record['disk_beta_radians']*galsim.radians)
if record['bulge_flux'] > 0.:
return bulge+disk
else:
return disk
else:
return bulge
def makeGalSimObjectReal(self, record, parameters, xsize, ysize, rng):
"""Routine to return a galsim.GSObject corresponding to a particular (real) galaxy in
the catalog."""
# First set up the basic RealGalaxy. But actually, to do that, we need to check that we
# have a RealGalaxyCatalog already read in. This happens in the generateCatalog step, but
# if we are running great3.run() only for later steps in the analysis process, then
# generateCatalog isn't run, so the galaxy builder won't have a stored RealGalaxyCatalog
# attribute. Check and read it in if necessary, before trying to make a RealGalaxy.
if not hasattr(self,'rgc'):
# Read in RealGalaxyCatalog, fits.
self.rgc = galsim.RealGalaxyCatalog(self.rgc_file, dir=self.gal_dir,
preload=self.preload)
noise_pad_size = int(np.ceil(constants.xsize[self.obs_type][self.multiepoch] *
np.sqrt(2.) *
constants.pixel_scale[self.obs_type][self.multiepoch]))
gal = galsim.RealGalaxy(self.rgc, rng=rng, id=record['cosmos_ident'],
noise_pad_size=noise_pad_size)
# Rescale its size.
gal.applyDilation(record['size_rescale'])
# Rotate.
gal.applyRotation(record['rot_angle_radians']*galsim.radians)
# Rescale its flux.
gal *= record['flux_rescale']
return gal
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/utils/extmath.py | 22 | 25569 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter=2,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix.
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=2,
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter: int (default is 2)
Number of power iterations (can be used to deal with very noisy
problems).
.. versionchanged:: 0.18
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
neerajvashistha/pa-dude | lib/python2.7/site-packages/numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| mit |
ericvandenbergfb/spark | python/pyspark/sql/udf.py | 4 | 6654 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-defined function related classes and functions
"""
import functools
from pyspark import SparkContext
from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.types import StringType, DataType, StructType, _parse_datatype_string
def _wrap_function(sc, func, returnType):
command = (func, returnType)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
def _create_udf(f, returnType, evalType):
if evalType == PythonEvalType.SQL_PANDAS_SCALAR_UDF:
import inspect
argspec = inspect.getargspec(f)
if len(argspec.args) == 0 and argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
elif evalType == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF:
import inspect
argspec = inspect.getargspec(f)
if len(argspec.args) != 1:
raise ValueError(
"Invalid function: pandas_udfs with function type GROUP_MAP "
"must take a single arg that is a pandas DataFrame."
)
# Set the name of the UserDefinedFunction object to be the name of function f
udf_obj = UserDefinedFunction(f, returnType=returnType, name=None, evalType=evalType)
return udf_obj._wrapped()
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
"""
def __init__(self, func,
returnType=StringType(), name=None,
evalType=PythonEvalType.SQL_BATCHED_UDF):
if not callable(func):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): "
"{0}".format(type(func)))
if not isinstance(returnType, (DataType, str)):
raise TypeError(
"Invalid returnType: returnType should be DataType or str "
"but is {}".format(returnType))
if not isinstance(evalType, int):
raise TypeError(
"Invalid evalType: evalType should be an int but is {}".format(evalType))
self.func = func
self._returnType = returnType
# Stores UserDefinedPythonFunctions jobj, once initialized
self._returnType_placeholder = None
self._judf_placeholder = None
self._name = name or (
func.__name__ if hasattr(func, '__name__')
else func.__class__.__name__)
self.evalType = evalType
@property
def returnType(self):
# This makes sure this is called after SparkContext is initialized.
# ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string.
if self._returnType_placeholder is None:
if isinstance(self._returnType, DataType):
self._returnType_placeholder = self._returnType
else:
self._returnType_placeholder = _parse_datatype_string(self._returnType)
if self.evalType == PythonEvalType.SQL_PANDAS_GROUP_MAP_UDF \
and not isinstance(self._returnType_placeholder, StructType):
raise ValueError("Invalid returnType: returnType must be a StructType for "
"pandas_udf with function type GROUP_MAP")
return self._returnType_placeholder
@property
def _judf(self):
# It is possible that concurrent access, to newly created UDF,
# will initialize multiple UserDefinedPythonFunctions.
# This is unlikely, doesn't affect correctness,
# and should have a minimal performance impact.
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf()
return self._judf_placeholder
def _create_judf(self):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
wrapped_func = _wrap_function(sc, self.func, self.returnType)
jdt = spark._jsparkSession.parseDataType(self.returnType.json())
judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction(
self._name, wrapped_func, jdt, self.evalType)
return judf
def __call__(self, *cols):
judf = self._judf
sc = SparkContext._active_spark_context
return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
return wrapper
| apache-2.0 |
LeeKamentsky/CellProfiler | cellprofiler/modules/saveimages.py | 1 | 60223 | '''<b>Save Images </b> saves image or movie files.
<hr>
Because CellProfiler usually performs many image analysis steps on many
groups of images, it does <i>not</i> save any of the resulting images to the
hard drive unless you specifically choose to do so with the <b>SaveImages</b>
module. You can save any of the
processed images created by CellProfiler during the analysis using this module.
<p>You can choose from many different image formats for saving your files. This
allows you to use the module as a file format converter, by loading files
in their original format and then saving them in an alternate format.</p>
<p>Note that saving images in 12-bit format is not supported, and 16-bit format
is supported for TIFF only.</p>
See also <b>NamesAndTypes</b>, <b>ConserveMemory</b>.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import logging
import matplotlib
import numpy as np
import re
import os
import sys
import scipy.io.matlab.mio
import traceback
logger = logging.getLogger(__name__)
import cellprofiler.cpmodule as cpm
import cellprofiler.measurements as cpmeas
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.preferences as cpp
from cellprofiler.gui.help import USING_METADATA_TAGS_REF, USING_METADATA_HELP_REF
from cellprofiler.preferences import \
standardize_default_folder_names, DEFAULT_INPUT_FOLDER_NAME, \
DEFAULT_OUTPUT_FOLDER_NAME, ABSOLUTE_FOLDER_NAME, \
DEFAULT_INPUT_SUBFOLDER_NAME, DEFAULT_OUTPUT_SUBFOLDER_NAME, \
IO_FOLDER_CHOICE_HELP_TEXT, IO_WITH_METADATA_HELP_TEXT, \
get_default_image_directory
from cellprofiler.utilities.relpath import relpath
from cellprofiler.modules.loadimages import C_FILE_NAME, C_PATH_NAME, C_URL
from cellprofiler.modules.loadimages import \
C_OBJECTS_FILE_NAME, C_OBJECTS_PATH_NAME, C_OBJECTS_URL
from cellprofiler.modules.loadimages import pathname2url
from cellprofiler.cpmath.cpmorphology import distance_color_labels
from cellprofiler.utilities.version import get_version
from bioformats.formatwriter import write_image
import bioformats.omexml as ome
IF_IMAGE = "Image"
IF_MASK = "Mask"
IF_CROPPING = "Cropping"
IF_FIGURE = "Module window"
IF_MOVIE = "Movie"
IF_OBJECTS = "Objects"
IF_ALL = [IF_IMAGE, IF_MASK, IF_CROPPING, IF_MOVIE, IF_OBJECTS]
OLD_BIT_DEPTH_8 = "8"
OLD_BIT_DEPTH_16 = "16"
BIT_DEPTH_8 = "8-bit integer"
BIT_DEPTH_16 = "16-bit integer"
BIT_DEPTH_FLOAT = "32-bit floating point"
FN_FROM_IMAGE = "From image filename"
FN_SEQUENTIAL = "Sequential numbers"
FN_SINGLE_NAME = "Single name"
SINGLE_NAME_TEXT = "Enter single file name"
FN_WITH_METADATA = "Name with metadata"
FN_IMAGE_FILENAME_WITH_METADATA = "Image filename with metadata"
METADATA_NAME_TEXT = ("""Enter file name with metadata""")
SEQUENTIAL_NUMBER_TEXT = "Enter file prefix"
FF_BMP = "bmp"
FF_JPG = "jpg"
FF_JPEG = "jpeg"
FF_PBM = "pbm"
FF_PCX = "pcx"
FF_PGM = "pgm"
FF_PNG = "png"
FF_PNM = "pnm"
FF_PPM = "ppm"
FF_RAS = "ras"
FF_TIF = "tif"
FF_TIFF = "tiff"
FF_XWD = "xwd"
FF_AVI = "avi"
FF_MAT = "mat"
FF_MOV = "mov"
FF_SUPPORTING_16_BIT = [FF_TIF, FF_TIFF]
PC_WITH_IMAGE = "Same folder as image"
OLD_PC_WITH_IMAGE_VALUES = ["Same folder as image"]
PC_CUSTOM = "Custom"
PC_WITH_METADATA = "Custom with metadata"
WS_EVERY_CYCLE = "Every cycle"
WS_FIRST_CYCLE = "First cycle"
WS_LAST_CYCLE = "Last cycle"
CM_GRAY = "gray"
GC_GRAYSCALE = "Grayscale"
GC_COLOR = "Color"
'''Offset to the directory path setting'''
OFFSET_DIRECTORY_PATH = 11
'''Offset to the bit depth setting in version 11'''
OFFSET_BIT_DEPTH_V11 = 12
class SaveImages(cpm.CPModule):
module_name = "SaveImages"
variable_revision_number = 11
category = "File Processing"
def create_settings(self):
self.save_image_or_figure = cps.Choice(
"Select the type of image to save",
IF_ALL,
IF_IMAGE,doc="""
The following types of images can be saved as a file on the hard drive:
<ul>
<li><i>%(IF_IMAGE)s:</i> Any of the images produced upstream of <b>SaveImages</b> can be selected for saving.
Outlines created by <b>Identify</b> modules can also be saved with this option, but you must
select "Retain outlines..." of identified objects within the <b>Identify</b> module. You might
also want to use the <b>OverlayOutlines</b> module prior to saving images.</li>
<li><i>%(IF_MASK)s:</i> Relevant only if the <b>Crop</b> module is used. The <b>Crop</b> module
creates a mask of the pixels of interest in the image. Saving the mask will produce a
binary image in which the pixels of interest are set to 1; all other pixels are
set to 0.</li>
<li><i>%(IF_CROPPING)s:</i> Relevant only if the <b>Crop</b> module is used. The <b>Crop</b>
module also creates a cropping image which is typically the same size as the original
image. However, since the <b>Crop</b> permits removal of the rows and columns that are left
blank, the cropping can be of a different size than the mask.</li>
<li><i>%(IF_MOVIE)s:</i> A sequence of images can be saved as a movie file. Currently only AVIs can be written.
Each image becomes a frame of the movie.</li>
<li><i>%(IF_OBJECTS)s:</i> Objects can be saved as an image. The image
is saved as grayscale unless you select a color map other than
gray. Background pixels appear as black and
each object is assigned an intensity level corresponding to
its object number. The resulting image can be loaded as objects
by the <b>NamesAndTypes</b> module. Objects are best saved as TIF
files. <b>SaveImages</b> will use an 8-bit TIF file if there
are fewer than 256 objects and will use a 16-bit TIF otherwise.
Results may be unpredictable if you save using PNG and there
are more than 255 objects or if you save using one of the other
file formats.</li>
</ul>"""%globals())
self.image_name = cps.ImageNameSubscriber(
"Select the image to save",cps.NONE, doc = """
<i>(Used only if "%(IF_IMAGE)s", "%(IF_MASK)s" or "%(IF_CROPPING)s" are selected to save)</i><br>
Select the image you want to save."""%globals())
self.objects_name = cps.ObjectNameSubscriber(
"Select the objects to save", cps.NONE,doc = """
<i>(Used only if saving "%(IF_OBJECTS)s")</i><br>
Select the objects that you want to save."""%globals())
self.figure_name = cps.FigureSubscriber(
"Select the module display window to save",cps.NONE,doc="""
<i>(Used only if saving "%(IF_FIGURE)s")</i><br>
Enter the module number/name for which you want to
save the module display window."""%globals())
self.file_name_method = cps.Choice(
"Select method for constructing file names",
[FN_FROM_IMAGE, FN_SEQUENTIAL,
FN_SINGLE_NAME],
FN_FROM_IMAGE,doc="""
<i>(Used only if saving non-movie files)</i><br>
Several choices are available for constructing the image file name:
<ul>
<li><i>%(FN_FROM_IMAGE)s:</i> The filename will be constructed based
on the original filename of an input image specified in <b>NamesAndTypes</b>.
You will have the opportunity to prefix or append
additional text.
<p>If you have metadata associated with your images, you can append an text
to the image filename using a metadata tag. This is especially useful if you
want your output given a unique label according to the metadata corresponding
to an image group. The name of the metadata to substitute can be provided for
each image for each cycle using the <b>Metadata</b> module.
%(USING_METADATA_TAGS_REF)s%(USING_METADATA_HELP_REF)s.</p></li>
<li><i>%(FN_SEQUENTIAL)s:</i> Same as above, but in addition, each filename
will have a number appended to the end that corresponds to
the image cycle number (starting at 1).</li>
<li><i>%(FN_SINGLE_NAME)s:</i> A single name will be given to the
file. Since the filename is fixed, this file will be overwritten with each cycle.
In this case, you would probably want to save the image on the last cycle
(see the <i>Select how often to save</i> setting). The exception to this is to
use a metadata tag to provide a unique label, as mentioned
in the <i>%(FN_FROM_IMAGE)s</i> option.</li>
</ul>"""%globals())
self.file_image_name = cps.FileImageNameSubscriber(
"Select image name for file prefix",
cps.NONE,doc="""
<i>(Used only when "%(FN_FROM_IMAGE)s" is selected for contructing the filename)</i><br>
Select an image loaded using <b>NamesAndTypes</b>. The original filename will be
used as the prefix for the output filename."""%globals())
self.single_file_name = cps.Text(
SINGLE_NAME_TEXT, "OrigBlue",
metadata = True, doc="""
<i>(Used only when "%(FN_SEQUENTIAL)s" or "%(FN_SINGLE_NAME)s" are selected for contructing the filename)</i><br>
Specify the filename text here. If you have metadata
associated with your images, enter the filename text with the metadata tags. %(USING_METADATA_TAGS_REF)s<br>
Do not enter the file extension in this setting; it will be appended automatically."""%globals())
self.number_of_digits = cps.Integer(
"Number of digits", 4, doc="""
<i>(Used only when "%(FN_SEQUENTIAL)s" is selected for contructing the filename)</i><br>
Specify the number of digits to be used for the sequential numbering. Zeros will be
used to left-pad the digits. If the number specified here is less than that needed to
contain the number of image sets, the latter will override the value entered."""%globals())
self.wants_file_name_suffix = cps.Binary(
"Append a suffix to the image file name?", False, doc = """
Select <i>%(YES)s</i> to add a suffix to the image's file name.
Select <i>%(NO)s</i> to use the image name as-is."""%globals())
self.file_name_suffix = cps.Text(
"Text to append to the image name",
"", metadata = True, doc="""
<i>(Used only when constructing the filename from the image filename)</i><br>
Enter the text that should be appended to the filename specified above.""")
self.file_format = cps.Choice(
"Saved file format",
[FF_BMP, FF_JPG, FF_JPEG, FF_PNG, FF_TIF, FF_TIFF, FF_MAT],
value = FF_TIF, doc="""
<i>(Used only when saving non-movie files)</i><br>
Select the image or movie format to save the image(s). Most common
image formats are available; MAT-files are readable by MATLAB.""")
self.movie_format = cps.Choice(
"Saved movie format",
[FF_AVI, FF_TIF, FF_MOV],
value = FF_AVI, doc="""
<i>(Used only when saving movie files)</i><br>
Select the movie format to use when saving movies. AVI and MOV
store images from successive image sets as movie frames. TIF
stores each image as an image plane in a TIF stack.
""")
self.pathname = SaveImagesDirectoryPath(
"Output file location", self.file_image_name,doc = """
<i>(Used only when saving non-movie files)</i><br>
This setting lets you choose the folder for the output
files. %(IO_FOLDER_CHOICE_HELP_TEXT)s
<p>An additional option is the following:
<ul>
<li><i>Same folder as image</i>: Place the output file in the same folder
that the source image is located.</li>
</ul></p>
<p>%(IO_WITH_METADATA_HELP_TEXT)s %(USING_METADATA_TAGS_REF)s.
For instance, if you have a metadata tag named
"Plate", you can create a per-plate folder by selecting one the subfolder options
and then specifying the subfolder name as "\g<Plate>". The module will
substitute the metadata values for the current image set for any metadata tags in the
folder name.%(USING_METADATA_HELP_REF)s.</p>
<p>If the subfolder does not exist when the pipeline is run, CellProfiler will
create it.</p>
<p>If you are creating nested subfolders using the sub-folder options, you can
specify the additional folders separated with slashes. For example, "Outlines/Plate1" will create
a "Plate1" folder in the "Outlines" folder, which in turn is under the Default
Input/Output Folder. The use of a forward slash ("/") as a folder separator will
avoid ambiguity between the various operating systems.</p>"""%globals())
# TODO:
self.bit_depth = cps.Choice(
"Image bit depth",
[BIT_DEPTH_8, BIT_DEPTH_16, BIT_DEPTH_FLOAT],doc="""
<i>(Used only when saving files in a non-MAT format)</i><br>
Select the bit-depth at which you want to save the images.
<i>%(BIT_DEPTH_FLOAT)s</i> saves the image as floating-point decimals
with 32-bit precision in its raw form, typically scaled between
0 and 1.
<b>%(BIT_DEPTH_16)s and %(BIT_DEPTH_FLOAT)s images are supported only
for TIF formats. Currently, saving images in 12-bit is not supported.</b>""" %
globals())
self.overwrite = cps.Binary(
"Overwrite existing files without warning?",False,doc="""
Select <i>%(YES)s</i> to automatically overwrite a file if it already exists.
Select <i>%(NO)s</i> to be prompted for confirmation first.
<p>If you are running the pipeline on a computing cluster,
select <i>%(YES)s</i> since you will not be able to intervene and answer the confirmation prompt.</p>"""%globals())
self.when_to_save = cps.Choice(
"When to save",
[WS_EVERY_CYCLE,WS_FIRST_CYCLE,WS_LAST_CYCLE],
WS_EVERY_CYCLE, doc="""<a name='when_to_save'>
<i>(Used only when saving non-movie files)</i><br>
Specify at what point during pipeline execution to save file(s). </a>
<ul>
<li><i>%(WS_EVERY_CYCLE)s:</i> Useful for when the image of interest is created every cycle and is
not dependent on results from a prior cycle.</li>
<li><i>%(WS_FIRST_CYCLE)s:</i> Useful for when you are saving an aggregate image created
on the first cycle, e.g., <b>CorrectIlluminationCalculate</b> with the <i>All</i>
setting used on images obtained directly from <b>NamesAndTypes</b>.</li>
<li><i>%(WS_LAST_CYCLE)s</i> Useful for when you are saving an aggregate image completed
on the last cycle, e.g., <b>CorrectIlluminationCalculate</b> with the <i>All</i>
setting used on intermediate images generated during each cycle.</li>
</ul> """%globals())
self.rescale = cps.Binary(
"Rescale the images? ",False,doc="""
<i>(Used only when saving non-MAT file images)</i><br>
Select <i>%(YES)s</i> if you want the image to occupy the full dynamic range of the bit
depth you have chosen. For example, if you save an image to an 8-bit file, the
smallest grayscale value will be mapped to 0 and the largest value will be mapped
to 2<sup>8</sup>-1 = 255.
<p>This will increase the contrast of the output image but will also effectively
stretch the image data, which may not be desirable in some
circumstances. See <b>RescaleIntensity</b> for other rescaling options.</p>"""%globals())
self.gray_or_color = cps.Choice(
"Save as grayscale or color image?",
[GC_GRAYSCALE, GC_COLOR],doc = """
<i>(Used only when saving "%(IF_OBJECTS)s")</i><br>
You can save objects as a grayscale image or as a color image.
<ul>
<li><i>%(GC_GRAYSCALE)s: </i> Use the pixel's object number
(label) for the grayscale intensity. Background pixels are
colored black. Grayscale images are more
suitable if you are going to load the image as objects using
<b>NamesAndTypes</b> or some other program that will be used to
relate object measurements to the pixels in the image.
You should save grayscale images using the .TIF or .MAT formats
if possible; otherwise you may have problems saving files
with more than 255 objects.</li>
<li><i>%(GC_COLOR)s:</i> Assigns different colors to different
objects.</li>
</ul>"""%globals())
self.colormap = cps.Colormap(
'Select colormap',
value = CM_GRAY,doc= """
<i>(Used only when saving non-MAT file images)</i><br>
This affects how images color intensities are displayed. All available colormaps can be seen
<a href="http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps">here</a>.""")
self.update_file_names = cps.Binary(
"Record the file and path information to the saved image?",False,doc="""
Select <i>%(YES)s</i>to store filename and pathname data for each of the new files created
via this module as a per-image measurement.
<p>Instances in which this information may be useful include:
<ul>
<li>Exporting measurements to a database, allowing
access to the saved image. If you are using the machine-learning tools or image
viewer in CellProfiler Analyst, for example, you will want to enable this setting if you want
the saved images to be displayed along with the original images.</li>
<li>Allowing downstream modules (e.g., <b>CreateWebPage</b>) to access
the newly saved files.</li>
</ul></p>"""%globals())
self.create_subdirectories = cps.Binary(
"Create subfolders in the output folder?",False,doc = """
Select <i>%(YES)s</i> to create subfolders to match the input image folder structure."""%globals())
self.root_dir = cps.DirectoryPath(
"Base image folder", doc = """
<i>Used only if creating subfolders in the output folder</i>
In subfolder mode, <b>SaveImages</b> determines the folder for
an image file by examining the path of the matching input file.
The path that SaveImages uses is relative to the image folder
chosen using this setting. As an example, input images might be stored
in a folder structure of "images%(sep)s<i>experiment-name</i>%(sep)s
<i>date</i>%(sep)s<i>plate-name</i>". If the image folder is
"images", <b>SaveImages</b> will store images in the subfolder,
"<i>experiment-name</i>%(sep)s<i>date</i>%(sep)s<i>plate-name</i>".
If the image folder is "images%(sep)s<i>experiment-name</i>",
<b>SaveImages</b> will store images in the subfolder,
<i>date</i>%(sep)s<i>plate-name</i>".
""" % dict(sep=os.path.sep))
def settings(self):
"""Return the settings in the order to use when saving"""
return [self.save_image_or_figure, self.image_name,
self.objects_name, self.figure_name,
self.file_name_method, self.file_image_name,
self.single_file_name, self.number_of_digits,
self.wants_file_name_suffix,
self.file_name_suffix, self.file_format,
self.pathname, self.bit_depth,
self.overwrite, self.when_to_save,
self.rescale, self.gray_or_color, self.colormap,
self.update_file_names, self.create_subdirectories,
self.root_dir, self.movie_format]
def visible_settings(self):
"""Return only the settings that should be shown"""
result = [self.save_image_or_figure]
if self.save_image_or_figure == IF_FIGURE:
result.append(self.figure_name)
elif self.save_image_or_figure == IF_OBJECTS:
result.append(self.objects_name)
else:
result.append(self.image_name)
result.append(self.file_name_method)
if self.file_name_method == FN_FROM_IMAGE:
result += [self.file_image_name, self.wants_file_name_suffix]
if self.wants_file_name_suffix:
result.append(self.file_name_suffix)
elif self.file_name_method == FN_SEQUENTIAL:
self.single_file_name.text = SEQUENTIAL_NUMBER_TEXT
# XXX - Change doc, as well!
result.append(self.single_file_name)
result.append(self.number_of_digits)
elif self.file_name_method == FN_SINGLE_NAME:
self.single_file_name.text = SINGLE_NAME_TEXT
result.append(self.single_file_name)
else:
raise NotImplementedError("Unhandled file name method: %s"%(self.file_name_method))
if self.save_image_or_figure == IF_MOVIE:
result.append(self.movie_format)
else:
result.append(self.file_format)
supports_16_bit = (self.file_format in FF_SUPPORTING_16_BIT and
self.save_image_or_figure == IF_IMAGE)
if supports_16_bit:
# TIFF supports 8 & 16-bit, all others are written 8-bit
result.append(self.bit_depth)
result.append(self.pathname)
result.append(self.overwrite)
if self.save_image_or_figure != IF_MOVIE:
result.append(self.when_to_save)
if (self.save_image_or_figure == IF_IMAGE and
self.file_format != FF_MAT):
result.append(self.rescale)
if self.get_bit_depth() == "8":
result.append(self.colormap)
elif self.save_image_or_figure == IF_OBJECTS:
result.append(self.gray_or_color)
if self.gray_or_color == GC_COLOR:
result.append(self.colormap)
result.append(self.update_file_names)
if self.file_name_method == FN_FROM_IMAGE:
result.append(self.create_subdirectories)
if self.create_subdirectories:
result.append(self.root_dir)
return result
@property
def module_key(self):
return "%s_%d"%(self.module_name, self.module_num)
def prepare_group(self, workspace, grouping, image_numbers):
d = self.get_dictionary(workspace.image_set_list)
if self.save_image_or_figure == IF_MOVIE:
d['N_FRAMES'] = len(image_numbers)
d['CURRENT_FRAME'] = 0
return True
def prepare_to_create_batch(self, workspace, fn_alter_path):
self.pathname.alter_for_create_batch_files(fn_alter_path)
if self.create_subdirectories:
self.root_dir.alter_for_create_batch_files(fn_alter_path)
def run(self,workspace):
"""Run the module
pipeline - instance of CellProfiler.Pipeline for this run
workspace - the workspace contains:
image_set - the images in the image set being processed
object_set - the objects (labeled masks) in this image set
measurements - the measurements for this run
frame - display within this frame (or None to not display)
"""
if self.save_image_or_figure.value in (IF_IMAGE, IF_MASK, IF_CROPPING):
should_save = self.run_image(workspace)
elif self.save_image_or_figure == IF_MOVIE:
should_save = self.run_movie(workspace)
elif self.save_image_or_figure == IF_OBJECTS:
should_save = self.run_objects(workspace)
else:
raise NotImplementedError(("Saving a %s is not yet supported"%
(self.save_image_or_figure)))
workspace.display_data.filename = self.get_filename(
workspace, make_dirs = False, check_overwrite = False)
def is_aggregation_module(self):
'''SaveImages is an aggregation module when it writes movies'''
return self.save_image_or_figure == IF_MOVIE or \
self.when_to_save == WS_LAST_CYCLE
def display(self, workspace, figure):
if self.show_window:
if self.save_image_or_figure == IF_MOVIE:
return
figure.set_subplots((1, 1))
outcome = ("Wrote %s" if workspace.display_data.wrote_image
else "Did not write %s")
figure.subplot_table(0, 0, [[outcome %
(workspace.display_data.filename)]])
def run_image(self,workspace):
"""Handle saving an image"""
#
# First, check to see if we should save this image
#
if self.when_to_save == WS_FIRST_CYCLE:
d = self.get_dictionary(workspace.image_set_list)
if workspace.measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX] > 1:
workspace.display_data.wrote_image = False
self.save_filename_measurements(workspace)
return
d["FIRST_IMAGE"] = False
elif self.when_to_save == WS_LAST_CYCLE:
workspace.display_data.wrote_image = False
self.save_filename_measurements( workspace)
return
self.save_image(workspace)
return True
def run_movie(self, workspace):
out_file = self.get_filename(workspace, check_overwrite=False)
# overwrite checks are made only for first frame.
d = self.get_dictionary(workspace.image_set_list)
if d["CURRENT_FRAME"] == 0 and os.path.exists(out_file):
if not self.check_overwrite(out_file, workspace):
d["CURRENT_FRAME"] = "Ignore"
return
else:
# Have to delete the old movie before making the new one
os.remove(out_file)
elif d["CURRENT_FRAME"] == "Ignore":
return
image = workspace.image_set.get_image(self.image_name.value)
pixels = image.pixel_data
pixels = pixels * 255
frames = d['N_FRAMES']
current_frame = d["CURRENT_FRAME"]
d["CURRENT_FRAME"] += 1
self.do_save_image(workspace, out_file, pixels, ome.PT_UINT8,
t = current_frame, size_t = frames)
def run_objects(self, workspace):
#
# First, check to see if we should save this image
#
if self.when_to_save == WS_FIRST_CYCLE:
if workspace.measurements[cpmeas.IMAGE, cpmeas.GROUP_INDEX] > 1:
workspace.display_data.wrote_image = False
self.save_filename_measurements(workspace)
return
elif self.when_to_save == WS_LAST_CYCLE:
workspace.display_data.wrote_image = False
self.save_filename_measurements( workspace)
return
self.save_objects(workspace)
def save_objects(self, workspace):
objects_name = self.objects_name.value
objects = workspace.object_set.get_objects(objects_name)
filename = self.get_filename(workspace)
if filename is None: # failed overwrite check
return
labels = [l for l, c in objects.get_labels()]
if self.get_file_format() == FF_MAT:
pixels = objects.segmented
scipy.io.matlab.mio.savemat(filename,{"Image":pixels},format='5')
elif self.gray_or_color == GC_GRAYSCALE:
if objects.count > 255:
pixel_type = ome.PT_UINT16
else:
pixel_type = ome.PT_UINT8
for i, l in enumerate(labels):
self.do_save_image(
workspace, filename, l, pixel_type, t=i, size_t=len(labels))
else:
if self.colormap == cps.DEFAULT:
colormap = cpp.get_default_colormap()
else:
colormap = self.colormap.value
cm = matplotlib.cm.get_cmap(colormap)
cpixels = np.zeros((labels[0].shape[0], labels[0].shape[1], 3))
counts = np.zeros(labels[0].shape, int)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
for pixels in labels:
cpixels[pixels != 0, :] += \
mapper.to_rgba(distance_color_labels(pixels),
bytes=True)[pixels != 0, :3]
counts[pixels != 0] += 1
counts[counts == 0] = 1
cpixels = cpixels / counts[:, :, np.newaxis]
self.do_save_image(workspace, filename, cpixels, ome.PT_UINT8)
self.save_filename_measurements(workspace)
if self.show_window:
workspace.display_data.wrote_image = True
def post_group(self, workspace, *args):
if (self.when_to_save == WS_LAST_CYCLE and
self.save_image_or_figure != IF_MOVIE):
if self.save_image_or_figure == IF_OBJECTS:
self.save_objects(workspace)
else:
self.save_image(workspace)
def do_save_image(self, workspace, filename, pixels, pixel_type,
c = 0, z = 0, t = 0,
size_c = 1, size_z = 1, size_t = 1,
channel_names = None):
'''Save image using bioformats
workspace - the current workspace
filename - save to this filename
pixels - the image to save
pixel_type - save using this pixel type
c - the image's channel index
z - the image's z index
t - the image's t index
sizeC - # of channels in the stack
sizeZ - # of z stacks
sizeT - # of timepoints in the stack
channel_names - names of the channels (make up names if not present
'''
write_image(filename, pixels, pixel_type,
c = c, z = z, t = t,
size_c = size_c, size_z = size_z, size_t = size_t,
channel_names = channel_names)
def save_image(self, workspace):
if self.show_window:
workspace.display_data.wrote_image = False
image = workspace.image_set.get_image(self.image_name.value)
if self.save_image_or_figure == IF_IMAGE:
pixels = image.pixel_data
u16hack = (self.get_bit_depth() == BIT_DEPTH_16 and
pixels.dtype.kind in ('u', 'i'))
if self.file_format != FF_MAT:
if self.rescale.value:
pixels = pixels.copy()
# Normalize intensities for each channel
if pixels.ndim == 3:
# RGB
for i in range(3):
img_min = np.min(pixels[:,:,i])
img_max = np.max(pixels[:,:,i])
if img_max > img_min:
pixels[:,:,i] = (pixels[:,:,i] - img_min) / (img_max - img_min)
else:
# Grayscale
img_min = np.min(pixels)
img_max = np.max(pixels)
if img_max > img_min:
pixels = (pixels - img_min) / (img_max - img_min)
elif not (u16hack or self.get_bit_depth() == BIT_DEPTH_FLOAT):
# Clip at 0 and 1
if np.max(pixels) > 1 or np.min(pixels) < 0:
sys.stderr.write(
"Warning, clipping image %s before output. Some intensities are outside of range 0-1" %
self.image_name.value)
pixels = pixels.copy()
pixels[pixels < 0] = 0
pixels[pixels > 1] = 1
if pixels.ndim == 2 and self.colormap != CM_GRAY and\
self.get_bit_depth() == BIT_DEPTH_8:
# Convert grayscale image to rgb for writing
if self.colormap == cps.DEFAULT:
colormap = cpp.get_default_colormap()
else:
colormap = self.colormap.value
cm = matplotlib.cm.get_cmap(colormap)
mapper = matplotlib.cm.ScalarMappable(cmap=cm)
pixels = mapper.to_rgba(pixels, bytes=True)
pixel_type = ome.PT_UINT8
elif self.get_bit_depth() == BIT_DEPTH_8:
pixels = (pixels*255).astype(np.uint8)
pixel_type = ome.PT_UINT8
elif self.get_bit_depth() == BIT_DEPTH_FLOAT:
pixel_type = ome.PT_FLOAT
else:
if not u16hack:
pixels = (pixels*65535)
pixel_type = ome.PT_UINT16
elif self.save_image_or_figure == IF_MASK:
pixels = image.mask.astype(np.uint8) * 255
pixel_type = ome.PT_UINT8
elif self.save_image_or_figure == IF_CROPPING:
pixels = image.crop_mask.astype(np.uint8) * 255
pixel_type = ome.PT_UINT8
filename = self.get_filename(workspace)
if filename is None: # failed overwrite check
return
if self.get_file_format() == FF_MAT:
scipy.io.matlab.mio.savemat(filename,{"Image":pixels},format='5')
elif self.get_file_format() == FF_BMP:
save_bmp(filename, pixels)
else:
self.do_save_image(workspace, filename, pixels, pixel_type)
if self.show_window:
workspace.display_data.wrote_image = True
if self.when_to_save != WS_LAST_CYCLE:
self.save_filename_measurements(workspace)
def check_overwrite(self, filename, workspace):
'''Check to see if it's legal to overwrite a file
Throws an exception if can't overwrite and no interaction available.
Returns False if can't overwrite, otherwise True.
'''
if not self.overwrite.value and os.path.isfile(filename):
try:
return (workspace.interaction_request(self, workspace.measurements.image_set_number, filename) == "Yes")
except workspace.NoInteractionException:
raise ValueError('SaveImages: trying to overwrite %s in headless mode, but Overwrite files is set to "No"' % (filename))
return True
def handle_interaction(self, image_set_number, filename):
'''handle an interaction request from check_overwrite()'''
import wx
dlg = wx.MessageDialog(wx.GetApp().TopWindow,
"%s #%d, set #%d - Do you want to overwrite %s?" % \
(self.module_name, self.module_num, image_set_number, filename),
"Warning: overwriting file", wx.YES_NO | wx.ICON_QUESTION)
result = dlg.ShowModal() == wx.ID_YES
return "Yes" if result else "No"
def save_filename_measurements(self, workspace):
if self.update_file_names.value:
filename = self.get_filename(workspace, make_dirs = False,
check_overwrite = False)
pn, fn = os.path.split(filename)
url = pathname2url(filename)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.file_name_feature,
fn,
can_overwrite=True)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.path_name_feature,
pn,
can_overwrite=True)
workspace.measurements.add_measurement(cpmeas.IMAGE,
self.url_feature,
url,
can_overwrite=True)
@property
def file_name_feature(self):
'''The file name measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_FILE_NAME, self.objects_name.value))
return '_'.join((C_FILE_NAME, self.image_name.value))
@property
def path_name_feature(self):
'''The path name measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_PATH_NAME, self.objects_name.value))
return '_'.join((C_PATH_NAME, self.image_name.value))
@property
def url_feature(self):
'''The URL measurement for the output file'''
if self.save_image_or_figure == IF_OBJECTS:
return '_'.join((C_OBJECTS_URL, self.objects_name.value))
return '_'.join((C_URL, self.image_name.value))
@property
def source_file_name_feature(self):
'''The file name measurement for the exemplar disk image'''
return '_'.join((C_FILE_NAME, self.file_image_name.value))
def source_path(self, workspace):
'''The path for the image data, or its first parent with a path'''
if self.file_name_method.value == FN_FROM_IMAGE:
path_feature = '%s_%s' % (C_PATH_NAME, self.file_image_name.value)
assert workspace.measurements.has_feature(cpmeas.IMAGE, path_feature),\
"Image %s does not have a path!" % (self.file_image_name.value)
return workspace.measurements.get_current_image_measurement(path_feature)
# ... otherwise, chase the cpimage hierarchy looking for an image with a path
cur_image = workspace.image_set.get_image(self.image_name.value)
while cur_image.path_name is None:
cur_image = cur_image.parent_image
assert cur_image is not None, "Could not determine source path for image %s' % (self.image_name.value)"
return cur_image.path_name
def get_measurement_columns(self, pipeline):
if self.update_file_names.value:
return [(cpmeas.IMAGE,
self.file_name_feature,
cpmeas.COLTYPE_VARCHAR_FILE_NAME),
(cpmeas.IMAGE,
self.path_name_feature,
cpmeas.COLTYPE_VARCHAR_PATH_NAME)]
else:
return []
def get_filename(self, workspace, make_dirs=True, check_overwrite=True):
"Concoct a filename for the current image based on the user settings"
measurements=workspace.measurements
if self.file_name_method == FN_SINGLE_NAME:
filename = self.single_file_name.value
filename = workspace.measurements.apply_metadata(filename)
elif self.file_name_method == FN_SEQUENTIAL:
filename = self.single_file_name.value
filename = workspace.measurements.apply_metadata(filename)
n_image_sets = workspace.measurements.image_set_count
ndigits = int(np.ceil(np.log10(n_image_sets+1)))
ndigits = max((ndigits,self.number_of_digits.value))
padded_num_string = str(measurements.image_set_number).zfill(ndigits)
filename = '%s%s'%(filename, padded_num_string)
else:
file_name_feature = self.source_file_name_feature
filename = measurements.get_current_measurement('Image',
file_name_feature)
filename = os.path.splitext(filename)[0]
if self.wants_file_name_suffix:
suffix = self.file_name_suffix.value
suffix = workspace.measurements.apply_metadata(suffix)
filename += suffix
filename = "%s.%s"%(filename,self.get_file_format())
pathname = self.pathname.get_absolute_path(measurements)
if self.create_subdirectories:
image_path = self.source_path(workspace)
subdir = relpath(image_path, self.root_dir.get_absolute_path())
pathname = os.path.join(pathname, subdir)
if len(pathname) and not os.path.isdir(pathname) and make_dirs:
try:
os.makedirs(pathname)
except:
#
# On cluster, this can fail if the path was created by
# another process after this process found it did not exist.
#
if not os.path.isdir(pathname):
raise
result = os.path.join(pathname, filename)
if check_overwrite and not self.check_overwrite(result, workspace):
return
if check_overwrite and os.path.isfile(result):
try:
os.remove(result)
except:
import bioformats
bioformats.clear_image_reader_cache()
os.remove(result)
return result
def get_file_format(self):
"""Return the file format associated with the extension in self.file_format
"""
if self.save_image_or_figure == IF_MOVIE:
return self.movie_format.value
return self.file_format.value
def get_bit_depth(self):
if (self.save_image_or_figure == IF_IMAGE and
self.get_file_format() in FF_SUPPORTING_16_BIT):
return self.bit_depth.value
else:
return BIT_DEPTH_8
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
"""Adjust the setting values to be backwards-compatible with old versions
"""
PC_DEFAULT = "Default output folder"
#################################
#
# Matlab legacy
#
#################################
if from_matlab and variable_revision_number == 12:
# self.create_subdirectories.value is already False by default.
variable_revision_number = 13
if from_matlab and variable_revision_number == 13:
new_setting_values = list(setting_values)
for i in [3, 12]:
if setting_values[i] == '\\':
new_setting_values[i] == cps.DO_NOT_USE
variable_revision_number = 14
if from_matlab and variable_revision_number == 14:
new_setting_values = []
if setting_values[0].isdigit():
new_setting_values.extend([IF_FIGURE,setting_values[1]])
elif setting_values[3] == 'avi':
new_setting_values.extend([IF_MOVIE, setting_values[0]])
elif setting_values[0].startswith("Cropping"):
new_setting_values.extend([IF_CROPPING,
setting_values[0][len("Cropping"):]])
elif setting_values[0].startswith("CropMask"):
new_setting_values.extend([IF_MASK,
setting_values[0][len("CropMask"):]])
else:
new_setting_values.extend([IF_IMAGE, setting_values[0]])
new_setting_values.append(new_setting_values[1])
if setting_values[1] == 'N':
new_setting_values.extend([FN_SEQUENTIAL,"None","None"])
elif setting_values[1][0] == '=':
new_setting_values.extend([FN_SINGLE_NAME,setting_values[1][1:],
setting_values[1][1:]])
else:
if len(cpmeas.find_metadata_tokens(setting_values[1])):
new_setting_values.extend([FN_WITH_METADATA, setting_values[1],
setting_values[1]])
else:
new_setting_values.extend([FN_FROM_IMAGE, setting_values[1],
setting_values[1]])
new_setting_values.extend(setting_values[2:4])
if setting_values[4] == '.':
new_setting_values.extend([PC_DEFAULT, "None"])
elif setting_values[4] == '&':
new_setting_values.extend([PC_WITH_IMAGE, "None"])
else:
if len(cpmeas.find_metadata_tokens(setting_values[1])):
new_setting_values.extend([PC_WITH_METADATA,
setting_values[4]])
else:
new_setting_values.extend([PC_CUSTOM, setting_values[4]])
new_setting_values.extend(setting_values[5:11])
#
# Last value is there just to display some text in Matlab
#
new_setting_values.extend(setting_values[12:-1])
setting_values = new_setting_values
from_matlab = False
variable_revision_number = 1
##########################
#
# Version 2
#
##########################
if not from_matlab and variable_revision_number == 1:
# The logic of the question about overwriting was reversed.
if setting_values[11] == cps.YES:
setting_values[11] = cps.NO
else:
setting_values[11] = cps.YES
variable_revision_number = 2
#########################
#
# Version 3
#
#########################
if (not from_matlab) and variable_revision_number == 2:
# Default image/output directory -> Default Image Folder
if setting_values[8].startswith("Default output"):
setting_values = (setting_values[:8] +
[PC_DEFAULT]+ setting_values[9:])
elif setting_values[8].startswith("Same"):
setting_values = (setting_values[:8] +
[PC_WITH_IMAGE] + setting_values[9:])
variable_revision_number = 3
#########################
#
# Version 4
#
#########################
if (not from_matlab) and variable_revision_number == 3:
# Changed save type from "Figure" to "Module window"
if setting_values[0] == "Figure":
setting_values[0] = IF_FIGURE
setting_values = standardize_default_folder_names(setting_values,8)
variable_revision_number = 4
#########################
#
# Version 5
#
#########################
if (not from_matlab) and variable_revision_number == 4:
save_image_or_figure, image_name, figure_name,\
file_name_method, file_image_name, \
single_file_name, file_name_suffix, file_format, \
pathname_choice, pathname, bit_depth, \
overwrite, when_to_save, \
when_to_save_movie, rescale, colormap, \
update_file_names, create_subdirectories = setting_values
pathname = SaveImagesDirectoryPath.static_join_string(
pathname_choice, pathname)
setting_values = [
save_image_or_figure, image_name, figure_name,
file_name_method, file_image_name, single_file_name,
file_name_suffix != cps.DO_NOT_USE,
file_name_suffix, file_format,
pathname, bit_depth, overwrite, when_to_save,
rescale, colormap, update_file_names, create_subdirectories]
variable_revision_number = 5
#######################
#
# Version 6
#
#######################
if (not from_matlab) and variable_revision_number == 5:
setting_values = list(setting_values)
file_name_method = setting_values[3]
single_file_name = setting_values[5]
wants_file_suffix = setting_values[6]
file_name_suffix = setting_values[7]
if file_name_method == FN_IMAGE_FILENAME_WITH_METADATA:
file_name_suffix = single_file_name
wants_file_suffix = cps.YES
file_name_method = FN_FROM_IMAGE
elif file_name_method == FN_WITH_METADATA:
file_name_method = FN_SINGLE_NAME
setting_values[3] = file_name_method
setting_values[6] = wants_file_suffix
setting_values[7] = file_name_suffix
variable_revision_number = 6
######################
#
# Version 7 - added objects
#
######################
if (not from_matlab) and (variable_revision_number == 6):
setting_values = (
setting_values[:2] + ["None"] + setting_values[2:14] +
[ GC_GRAYSCALE ] + setting_values[14:])
variable_revision_number = 7
######################
#
# Version 8 - added root_dir
#
######################
if (not from_matlab) and (variable_revision_number == 7):
setting_values = setting_values + [DEFAULT_INPUT_FOLDER_NAME]
variable_revision_number = 8
######################
#
# Version 9 - FF_TIF now outputs .tif files (go figure), so
# change FF_TIF in settings to FF_TIFF to maintain ultimate
# backwards compatibiliy.
#
######################
if (not from_matlab) and (variable_revision_number == 8):
if setting_values[9] == FF_TIF:
setting_values = setting_values[:9] + [FF_TIFF] + \
setting_values[10:]
variable_revision_number = 9
######################
#
# Version 10 - Add number of digits for sequential numbering
#
######################
if (not from_matlab) and (variable_revision_number == 9):
setting_values = setting_values[:7] + ["4"] + \
setting_values[7:]
variable_revision_number = 10
######################
#
# Version 11 - Allow selection of movie format
#
######################
if (not from_matlab) and (variable_revision_number == 10):
setting_values = setting_values + [ FF_AVI ]
variable_revision_number = 11
######################
#
# Version 11.5 - name of bit depth changed
# (can fix w/o version change)
#
######################
if variable_revision_number == 11:
bit_depth = setting_values[OFFSET_BIT_DEPTH_V11]
bit_depth = {
OLD_BIT_DEPTH_8:BIT_DEPTH_8,
OLD_BIT_DEPTH_16:BIT_DEPTH_16 }.get(bit_depth, bit_depth)
setting_values = setting_values[:OFFSET_BIT_DEPTH_V11] + \
[bit_depth] + setting_values[OFFSET_BIT_DEPTH_V11+1:]
setting_values[OFFSET_DIRECTORY_PATH] = \
SaveImagesDirectoryPath.upgrade_setting(setting_values[OFFSET_DIRECTORY_PATH])
return setting_values, variable_revision_number, from_matlab
def validate_module(self, pipeline):
if (self.save_image_or_figure in (IF_IMAGE, IF_MASK, IF_CROPPING) and
self.when_to_save in (WS_FIRST_CYCLE, WS_EVERY_CYCLE)):
#
# Make sure that the image name is available on every cycle
#
for setting in cps.get_name_providers(pipeline,
self.image_name):
if setting.provided_attributes.get(cps.AVAILABLE_ON_LAST_ATTRIBUTE):
#
# If we fell through, then you can only save on the last cycle
#
raise cps.ValidationError("%s is only available after processing all images in an image group" %
self.image_name.value,
self.when_to_save)
# XXX - should check that if file_name_method is
# FN_FROM_IMAGE, that the named image actually has the
# required path measurement
# Make sure metadata tags exist
if self.file_name_method == FN_SINGLE_NAME or \
(self.file_name_method == FN_FROM_IMAGE and self.wants_file_name_suffix.value):
text_str = self.single_file_name.value if self.file_name_method == FN_SINGLE_NAME else self.file_name_suffix.value
undefined_tags = pipeline.get_undefined_metadata_tags(text_str)
if len(undefined_tags) > 0:
raise cps.ValidationError("%s is not a defined metadata tag. Check the metadata specifications in your load modules" %
undefined_tags[0],
self.single_file_name if self.file_name_method == FN_SINGLE_NAME else self.file_name_suffix)
class SaveImagesDirectoryPath(cps.DirectoryPath):
'''A specialized version of DirectoryPath to handle saving in the image dir'''
def __init__(self, text, file_image_name, doc):
'''Constructor
text - explanatory text to display
file_image_name - the file_image_name setting so we can save in same dir
doc - documentation for user
'''
super(SaveImagesDirectoryPath, self).__init__(
text, dir_choices = [
cps.DEFAULT_OUTPUT_FOLDER_NAME, cps.DEFAULT_INPUT_FOLDER_NAME,
PC_WITH_IMAGE, cps.ABSOLUTE_FOLDER_NAME,
cps.DEFAULT_OUTPUT_SUBFOLDER_NAME,
cps.DEFAULT_INPUT_SUBFOLDER_NAME], doc=doc)
self.file_image_name = file_image_name
def get_absolute_path(self, measurements=None, image_set_index=None):
if self.dir_choice == PC_WITH_IMAGE:
path_name_feature = "PathName_%s" % self.file_image_name.value
return measurements.get_current_image_measurement(path_name_feature)
return super(SaveImagesDirectoryPath, self).get_absolute_path(
measurements, image_set_index)
def test_valid(self, pipeline):
if self.dir_choice not in self.dir_choices:
raise cps.ValidationError("%s is not a valid directory option" %
self.dir_choice, self)
@staticmethod
def upgrade_setting(value):
'''Upgrade setting from previous version'''
dir_choice, custom_path = cps.DirectoryPath.split_string(value)
if dir_choice in OLD_PC_WITH_IMAGE_VALUES:
dir_choice = PC_WITH_IMAGE
elif dir_choice in (PC_CUSTOM, PC_WITH_METADATA):
if custom_path.startswith('.'):
dir_choice = cps.DEFAULT_OUTPUT_SUBFOLDER_NAME
elif custom_path.startswith('&'):
dir_choice = cps.DEFAULT_INPUT_SUBFOLDER_NAME
custom_path = '.' + custom_path[1:]
else:
dir_choice = cps.ABSOLUTE_FOLDER_NAME
else:
return cps.DirectoryPath.upgrade_setting(value)
return cps.DirectoryPath.static_join_string(dir_choice, custom_path)
def save_bmp(path, img):
'''Save an image as a Microsoft .bmp file
path - path to file to save
img - either a 2d, uint8 image or a 2d + 3 plane uint8 RGB color image
Saves file as an uncompressed 8-bit or 24-bit .bmp image
'''
#
# Details from
# http://en.wikipedia.org/wiki/BMP_file_format#cite_note-DIBHeaderTypes-3
#
# BITMAPFILEHEADER
# http://msdn.microsoft.com/en-us/library/dd183374(v=vs.85).aspx
#
# BITMAPINFOHEADER
# http://msdn.microsoft.com/en-us/library/dd183376(v=vs.85).aspx
#
BITMAPINFOHEADER_SIZE = 40
img = img.astype(np.uint8)
w = img.shape[1]
h = img.shape[0]
#
# Convert RGB to interleaved
#
if img.ndim == 3:
rgb = True
#
# Compute padded raster length
#
raster_length = (w * 3 + 3) & ~ 3
tmp = np.zeros((h, raster_length), np.uint8)
#
# Do not understand why but RGB is BGR
#
tmp[:, 2:(w*3):3] = img[:, :, 0]
tmp[:, 1:(w*3):3] = img[:, :, 1]
tmp[:, 0:(w*3):3] = img[:, :, 2]
img = tmp
else:
rgb = False
if w % 4 != 0:
raster_length = (w + 3) & ~ 3
tmp = np.zeros((h, raster_length), np.uint8)
tmp[:, :w] = img
img = tmp
#
# The image is upside-down in .BMP
#
bmp = np.ascontiguousarray(np.flipud(img)).data
with open(path, "wb") as fd:
def write2(value):
'''write a two-byte little-endian value to the file'''
fd.write(np.array([value], "<u2").data[:2])
def write4(value):
'''write a four-byte little-endian value to the file'''
fd.write(np.array([value], "<u4").data[:4])
#
# Bitmap file header (1st pass)
# byte
# 0-1 = "BM"
# 2-5 = length of file
# 6-9 = 0
# 10-13 = offset from beginning of file to bitmap bits
fd.write("BM")
length = 14 # BITMAPFILEHEADER
length += BITMAPINFOHEADER_SIZE
if not rgb:
length += 4 * 256 # 256 color table entries
hdr_length = length
length += len(bmp)
write4(length)
write4(0)
write4(hdr_length)
#
# BITMAPINFOHEADER
#
write4(BITMAPINFOHEADER_SIZE) # biSize
write4(w) # biWidth
write4(h) # biHeight
write2(1) # biPlanes = 1
write2(24 if rgb else 8) # biBitCount
write4(0) # biCompression = BI_RGB
write4(len(bmp)) # biSizeImage
write4(7200) # biXPelsPerMeter
write4(7200) # biYPelsPerMeter
write4(0 if rgb else 256) # biClrUsed (no palette)
write4(0) # biClrImportant
if not rgb:
# The color table
color_table = np.column_stack(
[np.arange(256)]* 3 +
[np.zeros(256, np.uint32)]).astype(np.uint8)
fd.write(np.ascontiguousarray(color_table, np.uint8).data)
fd.write(bmp)
| gpl-2.0 |
akki-31/ml_lab_ecsc_306 | labwork/lab2/sci-learn/linear_regression.py | 104 | 1936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| apache-2.0 |
OpenDA-Association/OpenDA | course/exercise_double_pendulum_part1/pendulum.py | 2 | 1676 | # -*- coding: utf-8 -*-
"""
use as:
from pendulum import *
import simulation_unperturbed_results as sim
plot_movie(sim.model_time,sim,x)
Created on Mon Jul 3 2017
@author: verlaanm
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
import time
#constants in code for now \TODO improve
g=9.81
l=0.3
w=0.02
rad2deg=180./math.pi
def plot_pendulum(ax1,x,color='b'):
''' plot one state x=[th1, th2, pth1, pth2] '''
p1=patches.Rectangle((0.0,0.0),height=l,width=w,angle=rad2deg*x[0]-180.0,facecolor=color)
ax1.add_patch(p1)
x1=l*math.sin(x[0])
y1=-l*math.cos(x[0])
p2=patches.Rectangle((x1,y1),height=l,width=w,angle=rad2deg*x[1]-180.0,facecolor=color)
ax1.add_patch(p2)
def plot_movie(times,states,more_states=None):
''' plot sequence off plots of a double pendulum like a movie '''
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_xlim([-2.0*l,2.0*l])
ax1.set_ylim([-2.0*l,2.0*l])
#plt.ion()
for i in range(len(times)):
ax1.clear()
plot_pendulum(ax1,states[i,:])
ax1.set_xlim([-2.0*l,2.0*l])
ax1.set_ylim([-2.0*l,2.0*l])
if(more_states is not None):
plot_pendulum(ax1,more_states[i,:],color='g')
plt.title('time = %.2f'%times[i])
plt.draw()
plt.pause(0.05)
#plt.ioff()
if __name__ == '__main__':
#only used for testing
#plot first frame only
fig1 = plt.figure()
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_xlim([-2.0*l,2.0*l])
ax1.set_ylim([-2.0*l,2.0*l])
plot_pendulum(ax1,[0.25*math.pi,0.5*math.pi,0.0,0.0])
plt.show()
| lgpl-3.0 |
2bt/haec | index/messung/time_chart.py | 2 | 1030 | #!/usr/bin/env python
# coding=utf-8
import pylab as pl
import numpy as np
from matplotlib.legend_handler import HandlerLine2D
f = file("table3")
next(f)
next(f)
a = [map(eval,l.split()[::2]) for l in f]
a = [x for x in a if x[0] > 0 and x[3] == 25]
pl.figure(figsize=(10, 5), dpi=80)
pl.subplots_adjust(bottom=0.2, left=0.1, top=0.9, right=0.95)
hm = {}
for i, q in enumerate(sorted(set((x[0], x[1]) for x in a))):
X = [x[2] for x in a if tuple(x[:2]) == q]
Y = [x[4] for x in a if tuple(x[:2]) == q]
l, = pl.plot(X, Y, "pos*hd"[i], label="%d Kern%s, %d Thread%s" % (q[0], "e"*(q[0]!=1), q[1] + 1, "s"*(q[1]>0)))
hm[l] = HandlerLine2D(numpoints=1)
xticks = X
pl.xlabel(u"Taktfrequenz in MHz")
pl.ylabel(u"Ausführungszeit in s")
pl.legend(loc='upper right', prop={"size": 12}, handler_map=hm)
pl.grid(True, which='major')
pl.xticks(xticks, [240, '', '', '', 360, '', '', 480, '', 600, '', '', '', 720, '', '', 816, '', 912, '', 1008])
#pl.xlim(200, 1008 + 40)
pl.ylim(0, 100)
pl.savefig("cubie-time.pdf")
pl.show()
| mit |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/multipage_pdf.py | 1 | 1626 | """
This is a demo of creating a pdf file with several pages,
as well as adding metadata and annotations to pdf files.
"""
import datetime
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
# Create the PdfPages object to which we will save the pages:
# The with statement makes sure that the PdfPages object is closed properly at
# the end of the block, even if an Exception occurs.
with PdfPages('multipage_pdf.pdf') as pdf:
plt.figure(figsize=(3, 3))
plt.plot(range(7), [3, 1, 4, 1, 5, 9, 2], 'r-o')
plt.title('Page One')
pdf.savefig() # saves the current figure into a pdf page
plt.close()
plt.rc('text', usetex=True)
plt.figure(figsize=(8, 6))
x = np.arange(0, 5, 0.1)
plt.plot(x, np.sin(x), 'b-')
plt.title('Page Two')
pdf.attach_note("plot of sin(x)") # you can add a pdf note to
# attach metadata to a page
pdf.savefig()
plt.close()
plt.rc('text', usetex=False)
fig = plt.figure(figsize=(4, 5))
plt.plot(x, x*x, 'ko')
plt.title('Page Three')
pdf.savefig(fig) # or you can pass a Figure object to pdf.savefig
plt.close()
# We can also set the file's metadata via the PdfPages object:
d = pdf.infodict()
d['Title'] = 'Multipage PDF Example'
d['Author'] = u'Jouni K. Sepp\xe4nen'
d['Subject'] = 'How to create a multipage pdf file and set its metadata'
d['Keywords'] = 'PdfPages multipage keywords author title subject'
d['CreationDate'] = datetime.datetime(2009, 11, 13)
d['ModDate'] = datetime.datetime.today()
| mit |
hehongliang/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 25 | 13554 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for v in list(feeder.input_dtype.values()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for v in list(inp.values()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.cached_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.cached_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
# TODO(rohanj): Fix this test by fixing data_feeder. Currently, h5py doesn't
# support permutation based indexing lookups (More documentation at
# http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing)
def DISABLED_test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
file_path = os.path.join(self._base_dir, 'test_hdf5.h5')
h5f = h5py.File(file_path, 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File(file_path, 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 |
ashhher3/scikit-learn | sklearn/semi_supervised/label_propagation.py | 24 | 15181 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
s-gupta/fast-rcnn | tools/train_svms.py | 42 | 13247 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
return ((w * self.feature_scale, b * self.feature_scale),
pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_roidb()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
memo/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
pkruskal/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py | 69 | 57988 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| agpl-3.0 |
kastnerkyle/PyCon2015 | ex3_digits.py | 2 | 2229 | from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib import offsetbox
import numpy as np
"""
This example tweaked from
http://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
Original Authors:
Fabian Pedregosa <[email protected]>
Olivier Grisel <[email protected]>
Mathieu Blondel <[email protected]>
Gael Varoquaux
"""
digits = load_digits(n_class=6)
X = digits.data
y = digits.target
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
X_tsne = TSNE(n_components=2, init="pca", random_state=1999).fit_transform(X)
plot_embedding(X_tsne, title="TSNE embedding")
X_pca = PCA(n_components=2).fit_transform(X)
plot_embedding(X_pca, title="PCA embedding")
plt.show()
| bsd-3-clause |
NerdWalletOSS/Q | ML/DT/python/DTree_sklearn_ramesh_dataset_train_test.py | 1 | 2773 |
# coding: utf-8
# In[15]:
import sklearn_dt_utils as utils
from sklearn.tree import export_graphviz
import pandas as pd
import os
# In[16]:
q_src_dir = os.getenv('Q_SRC_ROOT')
if not q_src_dir:
print("'Q_SRC_ROOT' is not set")
exit(-1)
train_csv_file_path = "%s/ML/KNN/data/from_ramesh/ds1_11709_13248_train.csv" % q_src_dir
test_csv_file_path = "%s/ML/KNN/data/from_ramesh/ds1_11709_13248_test.csv" % q_src_dir
graphviz_gini = "graphviz_gini.txt"
graphviz_entropy = "graphviz_entropy.txt"
goal_col_name = "class"
# In[17]:
print("Train data shape")
train_data = utils.import_data(train_csv_file_path)
print("Test data shape")
test_data = utils.import_data(test_csv_file_path)
# In[18]:
X, Y, X_train, temp_X_train, y_train, temp_y_train = utils.split_dataset(train_data, goal_col_name, 1)
X, Y, X_test, temp_X_test, y_test, temp_y_test = utils.split_dataset(test_data, goal_col_name, 1)
# In[19]:
#print(len(X_train))
#print(len(X_test))
# In[20]:
# cross validation
# cross_validate_dt_new(X, Y)
# In[21]:
# cross validation
# cross_validate_dt(X, Y)
# In[22]:
#calling gridsearchcv
grid = utils.grid_search_cv(X_train, y_train, scoring_method="f1_weighted")
# pickle_path = "category1_f1_wt.pkl"
# saving model to pkl file
# utils.save(grid, pickle_path)
# loading model from pkl file
# grid = utils.restore(pickle_path)
"""
print(grid.cv_results_)
print("============================")
print(grid.best_estimator_)
print("============================")
print(grid.best_score_)
print("============================")
print(grid.best_params_)
print("============================")
"""
# Prediction using gini
y_pred_gini = utils.prediction(X_test, grid.best_estimator_)
print("Results for grid search algo")
utils.cal_accuracy(y_test, y_pred_gini)
export_graphviz(grid.best_estimator_, out_file="best_fit_graphviz_ramesh_acr.txt", filled=True, rounded=True, special_characters=True, feature_names=X_train.columns)
# Train using gini
clf_gini = utils.train_using_gini(X_train, y_train)
# print(X_train[1])
export_graphviz(clf_gini, out_file=graphviz_gini, filled=True, rounded=True, special_characters=True, feature_names=X_train.columns)
# In[23]:
# Prediction using gini
y_pred_gini = utils.prediction(X_test, clf_gini)
print("Results for gini algo")
utils.cal_accuracy(y_test, y_pred_gini)
# In[24]:
# Train using entropy
clf_entropy = utils.tarin_using_entropy(X_train, y_train)
# print(clf_entropy)
export_graphviz(clf_entropy, out_file=graphviz_entropy, filled=True, rounded=True, special_characters=True, feature_names=X_train.columns)
# In[25]:
# Prediction using entropy
y_pred_entropy = utils.prediction(X_test, clf_entropy)
print("Results for entropy algo")
utils.cal_accuracy(y_test, y_pred_entropy)
| mit |
idaholab/raven | tests/framework/unit_tests/TSA/testFourier.py | 1 | 13409 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Module performs Unit Tests for the TSA.Fourier class.
It can not be considered part of the active code but of the regression test system
"""
import os
import sys
import copy
import numpy as np
# add RAVEN to path
frameworkDir = os.path.abspath(os.path.join(*([os.path.dirname(__file__)] + [os.pardir]*4 + ['framework'])))
if frameworkDir not in sys.path:
sys.path.append(frameworkDir)
from utils.utils import find_crow
find_crow(frameworkDir)
from utils import xmlUtils
from TSA import Fourier
plot = False
print('Module undergoing testing:')
print(Fourier)
print('')
results = {"pass":0,"fail":0}
def checkFloat(comment, value, expected, tol=1e-10, update=True):
"""
This method is aimed to compare two floats given a certain tolerance
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
if np.isnan(value) and np.isnan(expected):
res = True
elif np.isnan(value) or np.isnan(expected):
res = False
else:
res = abs(value - expected) <= tol
if update:
if not res:
print("checking float",comment,'|',value,"!=",expected)
results["fail"] += 1
else:
results["pass"] += 1
return res
def checkTrue(comment, res, update=True):
"""
This method is a pass-through for consistency and updating
@ In, comment, string, a comment printed out if it fails
@ In, res, bool, the tested value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if test
"""
if update:
if res:
results["pass"] += 1
else:
print("checking bool",comment,'|',res,'is not True!')
results["fail"] += 1
return res
def checkSame(comment, value, expected, update=True):
"""
This method is aimed to compare two identical things
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = value == expected
if update:
if res:
results["pass"] += 1
else:
print("checking string",comment,'|',value,"!=",expected)
results["fail"] += 1
return res
def checkArray(comment, first, second, dtype, tol=1e-10, update=True):
"""
This method is aimed to compare two arrays
@ In, comment, string, a comment printed out if it fails
@ In, value, float, the value to compare
@ In, expected, float, the expected value
@ In, tol, float, optional, the tolerance
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if same
"""
res = True
if len(first) != len(second):
res = False
print("checking answer",comment,'|','lengths do not match:',len(first),len(second))
else:
for i in range(len(first)):
if dtype == float:
pres = checkFloat('',first[i],second[i],tol,update=False)
elif dtype in (str,unicode):
pres = checkSame('',first[i],second[i],update=False)
if not pres:
print('checking array',comment,'|','entry "{}" does not match: {} != {}'.format(i,first[i],second[i]))
res = False
if update:
if res:
results["pass"] += 1
else:
results["fail"] += 1
return res
def checkNone(comment, entry, update=True):
"""
Checks if entry is None.
@ In, comment, string, a comment printed out if it fails
@ In, entry, object, to test if against None
@ In, update, bool, optional, if False then don't update results counter
@ Out, res, bool, True if None
"""
res = entry is None
if update:
if res:
results["pass"] += 1
else:
print("checking answer",comment,'|','"{}" is not None!'.format(entry))
results["fail"] += 1
def checkFails(comment, errstr, function, update=True, args=None, kwargs=None):
"""
Checks if expected error occurs
@ In, comment, string, a comment printed out if it fails
@ In, errstr, str, expected fail message
@ In, function, method, method to run to test for failure
@ In, update, bool, optional, if False then don't update results counter
@ In, args, list, arguments to pass to function
@ In, kwargs, dict, keyword arguments to pass to function
@ Out, res, bool, True if failed as expected
"""
print('Error testing ...')
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
function(*args,**kwargs)
res = False
msg = 'Function call did not error!'
except Exception as e:
res = checkSame('',e.args[0],errstr,update=False)
if not res:
msg = 'Unexpected error message. \n Received: "{}"\n Expected: "{}"'.format(e.args[0],errstr)
if update:
if res:
results["pass"] += 1
print(' ... end Error testing (PASSED)')
else:
print("checking error",comment,'|',msg)
results["fail"] += 1
print(' ... end Error testing (FAILED)')
print('')
return res
######################################
# CONSTRUCTION #
######################################
def createFourierXML(targets, periods):
xml = xmlUtils.newNode('Fourier', attrib={'target':','.join(targets)})
xml.append(xmlUtils.newNode('periods', text=','.join(str(k) for k in periods)))
return xml
def createFromXML(xml):
fourier = Fourier()
inputSpec = Fourier.getInputSpecification()()
inputSpec.parseNode(xml)
fourier.handleInput(inputSpec)
return fourier
def createFourier(targets, periods):
xml = createFourierXML(targets, periods)
fourier = createFromXML(xml)
return fourier
def createFourierSignal(amps, periods, phases, pivot, intercept=0, plot=False):
if plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
signal = np.zeros(len(pivot)) + intercept
for k, period in enumerate(periods):
new = amps[k] * np.sin(2 * np.pi / period * pivot + phases[k])
if plot:
ax.plot(pivot, new, ':')
signal += new
if plot:
ax.plot(pivot, signal, 'k-')
plt.show()
return signal
###################
# Simple #
###################
# generate signal
targets = ['A', 'B', 'C']
pivot = np.arange(100) / 10.
periods = [2, 5, 10]
amps = [0.5, 1, 2]
phasesA = [0, np.pi, 0]
signalA = createFourierSignal(amps, periods, phasesA, pivot, plot=plot)
phasesB = [np.pi, 0, np.pi/4]
signalB = createFourierSignal(amps, periods, phasesB, pivot, plot=plot)
phasesC = [np.pi, np.pi/4, -np.pi/4]
interceptC = 2
signalC = createFourierSignal(amps, periods, phasesC, pivot, intercept=interceptC, plot=plot)
signals = np.zeros((len(pivot), 3))
signals[:, 0] = signalA
signals[:, 1] = signalB
signals[:, 2] = signalC
fourier = createFourier(targets, periods)
settings = {'periods': periods}
params = fourier.characterize(signals, pivot, targets, settings)
checkTrue("fourier can generate", fourier.canGenerate())
checkTrue("fourier can characterize", fourier.canCharacterize())
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# residual
## add constant to training, make sure we get constant back
const = 42.0
residSig = signals + const
resid = fourier.getResidual(residSig, params, pivot, settings)
checkFloat('Residual check', (resid-const).sum(), 0)
# recreate signals
res = fourier.generate(params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
##### now redo with non-simultaneous fitting
params = fourier.characterize(signals, pivot, targets, settings, simultFit=False)
# intercepts
checkFloat('Signal A intercept', params['A']['intercept'], 0)
checkFloat('Signal B intercept', params['B']['intercept'], 0)
checkFloat('Signal C intercept', params['C']['intercept'], interceptC)
# amplitudes
checkFloat('Signal A period 0 amplitude', params['A']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal A period 1 amplitude', params['A']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal A period 2 amplitude', params['A']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal B period 0 amplitude', params['B']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal B period 1 amplitude', params['B']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal B period 2 amplitude', params['B']['coeffs'][periods[2]]['amplitude'], amps[2])
checkFloat('Signal C period 0 amplitude', params['C']['coeffs'][periods[0]]['amplitude'], amps[0])
checkFloat('Signal C period 1 amplitude', params['C']['coeffs'][periods[1]]['amplitude'], amps[1])
checkFloat('Signal C period 2 amplitude', params['C']['coeffs'][periods[2]]['amplitude'], amps[2])
# phases
# check absolute value of phase pi since -pi and pi are often converged on separately
checkFloat('Signal A period 0 phase', params['A']['coeffs'][periods[0]]['phase'] , phasesA[0])
checkFloat('Signal A period 1 phase', abs(params['A']['coeffs'][periods[1]]['phase']), phasesA[1])
checkFloat('Signal A period 2 phase', params['A']['coeffs'][periods[2]]['phase'] , phasesA[2])
checkFloat('Signal B period 0 phase', abs(params['B']['coeffs'][periods[0]]['phase']), phasesB[0])
checkFloat('Signal B period 1 phase', params['B']['coeffs'][periods[1]]['phase'] , phasesB[1])
checkFloat('Signal B period 2 phase', params['B']['coeffs'][periods[2]]['phase'] , phasesB[2])
checkFloat('Signal C period 0 phase', abs(params['C']['coeffs'][periods[0]]['phase']), phasesC[0])
checkFloat('Signal C period 1 phase', params['C']['coeffs'][periods[1]]['phase'] , phasesC[1])
checkFloat('Signal C period 2 phase', params['C']['coeffs'][periods[2]]['phase'] , phasesC[2])
# recreate signals
res = fourier.generate(params, pivot, settings)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} replication', res[:, tg], signals[:, tg], float)
# check residual
# -> generate random noise to add to signal, then check it is returned in residual
r = np.random.rand(pivot.size, len(targets))
new = r + signals
res = fourier.getResidual(new, params, pivot, None)
for tg, target in enumerate(targets):
checkArray(f'Signal {target} residual', res[:, tg], r[:, tg], float)
print(results)
sys.exit(results["fail"])
"""
<TestInfo>
<name>framework.unit_tests.TSA.Fourier</name>
<author>talbpaul</author>
<created>2021-01-05</created>
<classesTested>TSA.Fourier</classesTested>
<description>
This test is a Unit Test for the Fourier TimeSeriesAnalyzer classes.
</description>
</TestInfo>
"""
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.