repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
glasperfan/thesis | bach_code/logit.py | 1 | 6793 | from sklearn import linear_model
from sklearn import naive_bayes
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
import h5py
import numpy
from collections import Counter
def encode(train, test):
encoder = OneHotEncoder()
encoder.fit(numpy.vstack((train, test)))
trainencoded = encoder.transform(train)
testencoded = encoder.transform(test)
return encoder, trainencoded, testencoded
def runLogitAndNB(Xtrainsparse, Xtestsparse):
for i in range(len(ytrainraw[0])):
print "Output type %i" % i
logit1 = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs', C=1)
logit2 = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs', C=100)
logit3 = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs', C=10000)
nb1 = naive_bayes.MultinomialNB(alpha=0.01, fit_prior=True, class_prior=None)
nb2 = naive_bayes.MultinomialNB(alpha=0.1, fit_prior=True, class_prior=None)
nb3 = naive_bayes.MultinomialNB(alpha=1, fit_prior=True, class_prior=None)
RF1 = RandomForestClassifier(1, "entropy", None)
RF2 = RandomForestClassifier(10, "entropy", None)
RF3 = RandomForestClassifier(20, "entropy", None)
ytrain = numpy.hstack((ytrainraw[:, i], ydevraw[:, i]))
ytest = ytestraw[:, i]
RF1.fit(Xtrainsparse, ytrain)
RF2.fit(Xtrainsparse, ytrain)
RF3.fit(Xtrainsparse, ytrain)
scores = [RF1.score(Xtestsparse, ytest), RF2.score(Xtestsparse, ytest), RF3.score(Xtestsparse, ytest)]
print "R-FOREST: Best score %.2f%%, min of %.2f%%" % (max(scores) * 100, min(scores) * 100)
ERF = ExtraTreesClassifier(n_estimators=40, max_depth=None, min_samples_split=1, random_state=0)
ERF.fit(Xtrainsparse, ytrain)
print "EXTRA TREES: Best score %.2f%%" % (ERF.score(Xtestsparse, ytest) * 100)
nb1.fit(Xtrainsparse, ytrain)
nb2.fit(Xtrainsparse, ytrain)
nb3.fit(Xtrainsparse, ytrain)
scores = [nb1.score(Xtestsparse, ytest), nb2.score(Xtestsparse, ytest), nb3.score(Xtestsparse, ytest)]
print "MULTI-NB: Best score %.2f%%" % (max(scores) * 100)
logit1.fit(Xtrainsparse, ytrain)
logit2.fit(Xtrainsparse, ytrain)
logit3.fit(Xtrainsparse, ytrain)
scores = [logit1.score(Xtestsparse, ytest), logit2.score(Xtestsparse, ytest), logit3.score(Xtestsparse, ytest)]
print "LOGIT: Best score %.2f%%" % (max(scores) * 100)
most_common = lambda lst : max(set(list(lst)), key=list(lst).count)
print "Most common class frequency: %.1f%% (train) %.1f%% (test)" % \
(Counter(ytrain)[most_common(ytrain)] / float(len(ytrain)) * 100., \
Counter(ytest)[most_common(ytest)] / float(len(ytest)) * 100.)
print
# Load data
with h5py.File('data/chorales.hdf5', "r", libver='latest') as f:
Xtrainraw = f['Xtrain'].value
ytrainraw = f['ytrain'].value
Xdevraw = f['Xdev'].value
ydevraw = f['ydev'].value
Xtestraw = f['Xtest'].value
ytestraw = f['ytest'].value
# Cycle through the output targets and see how logistic regression performs based solely on the melody
def test1():
print("1. Testing learning on melody alone...")
Xtrain = numpy.vstack((Xtrainraw[:, range(0,10)], Xdevraw[:, range(0,10)]))
Xtest = Xtestraw[:, range(0,10)]
encoder, Xtrainsparse, Xtestsparse = encode(Xtrain, Xtest)
runLogitAndNB(Xtrainsparse, Xtestsparse)
# Oracle experiments
def test2():
print("2. Performing oracle experiment...")
Xtrain = numpy.vstack((Xtrainraw, Xdevraw))
Xtest = Xtestraw
encoder, Xtrainsparse, Xtestsparse = encode(Xtrain, Xtest)
runLogitAndNB(Xtrainsparse, Xtestsparse)
def load_dataset(name, data_file):
dataX, datay = [], []
with h5py.File(data_file, "r", libver='latest') as f:
counter = 0
while True:
try:
dataX.append(f['%s/chorale%d_X' % (name, counter)].value)
datay.append(f['%s/chorale%d_y' % (name, counter)].value)
except:
break
counter += 1
return dataX, datay
# Score without counting padding
def score_with_padding(pred, ytest, ypadding):
correct = 0.0
for idx, p in enumerate(pred):
if ytest[idx] == p and ytest[idx] != ypadding:
correct += 1
return correct / ytest.shape[0]
# Full harmonization
def test3():
print("3. Testing softmax for full harmonization...")
trainXc, trainyc = load_dataset("train", "data/chorales_rnn.hdf5")
devXc, devyc = load_dataset("dev", "data/chorales_rnn.hdf5")
testXc, testyc = load_dataset("test", "data/chorales_rnn.hdf5")
stack = lambda x1, x2: numpy.vstack((x1, x2))
hstack = lambda x1, x2: numpy.hstack((x1, x2))
# Remove Oracle features
trainXc = [X[:, range(0,10)] for X in trainXc]
devXc = [X[:, range(0,10)] for X in devXc]
testXc = [X[:, range(0,10)] for X in testXc]
# Aggregate data
Xtrain = stack(reduce(stack, trainXc), reduce(stack, devXc))
ytrain = hstack(reduce(hstack, trainyc), reduce(hstack, devyc))
Xtest, ytest = reduce(stack, testXc), reduce(hstack, testyc)
# Remove padding
ypadding = ytest.max()
Xtrain_up, ytrain_up, Xtest_up, ytest_up = [], [], [], []
for idx, p in enumerate(ytrain):
if p != ypadding:
Xtrain_up.append(Xtrain[idx])
ytrain_up.append(ytrain[idx])
for idx, p in enumerate(ytest):
if p != ypadding:
Xtest_up.append(Xtest[idx])
ytest_up.append(ytest[idx])
Xtrain, ytrain, Xtest, ytest = numpy.array(Xtrain_up), numpy.array(ytrain_up), \
numpy.array(Xtest_up), numpy.array(ytest_up)
encoder, Xtrainsparse, Xtestsparse = encode(Xtrain, Xtest)
RF = RandomForestClassifier(10, "entropy", None)
RF.fit(Xtrain, ytrain)
# Write full harmonization data
with h5py.File('data/chorales_sm.hdf5', "w", libver="latest") as f:
f.create_dataset("Xtrain", Xtrain.shape, dtype="i", data=Xtrain)
f.create_dataset("ytrain", ytrain.shape, dtype="i", data=ytrain)
f.create_dataset("Xtest", Xtest.shape, dtype="i", data=Xtest)
f.create_dataset("ytest", ytest.shape, dtype="i", data=ytest)
print "Full harmonization data written"
score_RF_train = RF.score(Xtrain, ytrain)
score_RF_test = RF.score(Xtest, ytest)
print "R-FOREST: %.2f%% training, %.2f%% test" % (score_RF_train * 100, score_RF_test * 100)
ERF = ExtraTreesClassifier(n_estimators=40, max_depth=None, min_samples_split=1, random_state=0)
ERF.fit(Xtrainsparse, ytrain)
score_ERF_train = ERF.score(Xtrainsparse, ytrain)
score_ERF_test = ERF.score(Xtestsparse, ytest)
print "EXTRA TREES: %.2f%% training, %.2f%% test" % (score_ERF_train * 100, score_ERF_test * 100)
logit = linear_model.LogisticRegression(multi_class='multinomial', solver='lbfgs', C=1)
logit.fit(Xtrainsparse, ytrain)
score_logit_train = logit.score(Xtrainsparse, ytrain)
score_logit_test = logit.score(Xtestsparse, ytest)
print "LOGIT: %.2f%% training, %.2f%% test" % (score_logit_train * 100, score_logit_test * 100)
def run():
# test1()
test2()
# test3()
run()
| apache-2.0 |
lucasrodes/whatstk | tests/whatsapp/test_generation.py | 1 | 2441 | import numpy as np
import pandas as pd
from datetime import datetime
from whatstk.whatsapp.objects import WhatsAppChat
from whatstk.whatsapp.generation import ChatGenerator, generate_chats_hformats
USERS = ['laurent', 'anna', 'lua', 'miquel']
def test_generate_messages():
cg = ChatGenerator(size=10, users=USERS)
messages = cg._generate_messages()
assert(isinstance(messages, (list, np.ndarray)))
assert(all([isinstance(m, str) for m in messages]))
def test_generate_emojis():
cg = ChatGenerator(size=10, users=USERS)
emojis = cg._generate_emojis()
assert(isinstance(emojis, (list, np.ndarray)))
assert(all([isinstance(e, str) for e in emojis]))
def test_generate_timestamps_1():
cg = ChatGenerator(size=10, users=USERS)
timestamps = cg._generate_timestamps()
assert(isinstance(timestamps, (list, np.ndarray)))
assert(all([isinstance(ts, datetime) for ts in timestamps]))
def test_generate_timestamps_2():
cg = ChatGenerator(size=10, users=USERS)
timestamps = cg._generate_timestamps(last=datetime.now())
assert(isinstance(timestamps, (list, np.ndarray)))
assert(all([isinstance(ts, datetime) for ts in timestamps]))
def test_generate_users():
cg = ChatGenerator(size=10, users=USERS)
users = cg._generate_users()
assert(isinstance(users, (list, np.ndarray)))
assert(all([isinstance(u, str) for u in users]))
def test_generate_df():
cg = ChatGenerator(size=10, users=USERS)
df = cg._generate_df()
assert(isinstance(df, pd.DataFrame))
def test_generate_1():
cg = ChatGenerator(size=10, users=USERS)
chat = cg.generate()
assert(isinstance(chat, WhatsAppChat))
def test_generate_2():
cg = ChatGenerator(size=10, users=USERS)
chat = cg.generate(hformat='y-%m-%d, %H:%M - %name:')
assert(isinstance(chat, WhatsAppChat))
def test_generate_3(tmpdir):
cg = ChatGenerator(size=10, users=USERS)
filepath = tmpdir.join("export.txt")
chat = cg.generate(filepath=str(filepath))
assert(isinstance(chat, WhatsAppChat))
def test_generate_chats_hformats(tmpdir):
output_path = tmpdir.mkdir("output")
generate_chats_hformats(output_path, size=2, verbose=False)
def test_generate_chats_hformats_2(tmpdir):
output_path = tmpdir.mkdir("output")
hformat = '%Y-%m-%d, %H:%M - %name:'
generate_chats_hformats(output_path, size=2, hformats=[hformat], filepaths=['file.txt'], verbose=False) | gpl-3.0 |
sameersingh/onebusaway | ml/oba_ml/lasso_regression.py | 1 | 1186 | from __future__ import division
import numpy as np
from common import *
from sklearn import linear_model
def main():
np.set_printoptions(threshold=np.nan)
feature_names = get_feature_names()
x_train, y_train, = get_data("training.dat")
# run linear regression with l1 regularization
clf = linear_model.Lasso(alpha=1000)
clf.fit(x_train, y_train)
#print clf.alpha_
w = clf.coef_
y_hat_train = x_train.dot(w)
rmse_our_train, rmse_oba_train = get_rmse(y_train, y_hat_train)
x_test, y_test = get_data("test.dat")
y_hat_test = x_test.dot(w)
rmse_our_test, rmse_oba_test = get_rmse(y_test, y_hat_test)
print "RMSE OUR Train ", rmse_our_train
print "RMSE OBA Train ", rmse_oba_train
print "RMSE OUR Test ", rmse_our_test
print "RMSE OBA Test ", rmse_oba_test
save_scatter_plot(y_train, y_hat_train, "train")
save_scatter_plot(y_test, y_hat_test, "test")
build_output_files(y_hat_train, y_hat_test, y_train, y_test)
print_weights(w, feature_names);
report_range(y_train)
report_range(y_test)
if __name__ == '__main__':
main() | apache-2.0 |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/util/_doctools.py | 5 | 6806 | import numpy as np
import pandas as pd
import pandas.compat as compat
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left),
self._shape(right)[0])
hcells = (max(self._shape(l)[1] for l in left) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(l)[1] for l in left)
max_left_rows = max(self._shape(l)[0] for l in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| mit |
speed-of-light/pyslider | lib/exp/evaluator/ground_truth.py | 1 | 5773 | import pandas as pd
from lib.exp.base import ExpCommon
from lib.exp.summary import Summary
from base import DfExt
class GroundTruth(ExpCommon, Summary):
def __init__(self, root, name):
"""
Mainly designed to containing 3 dataframes:
`abs_pairs`, `rel_pairs`, `segments`
"""
ExpCommon.__init__(self, root, name)
Summary.__init__(self)
def univ_df(self):
"""
Convert univ_df raw ground_truth to dataframe
"""
coll = []
gnd = "data/{}/{}/ground_truth".format(self.root, self.name)
with open(gnd, 'r') as f:
for ln in f.readlines():
cols = [int(x) for x in ln.split(',')]
coll.append(cols)
gf = pd.DataFrame(data=coll,
columns=['fid', 'sid', 'slide_type', 'cam_status'])
gcf = self.__aggregate(gf.copy())
return gcf
def __aggregate(self, df):
"""
Get the univ aggregated result
"""
f_slid = -1
f_keep = -1
f_cnt = 1
for inx in df.index:
if f_slid < 0:
f_slid = df.ix[inx]['sid']
continue
if f_slid == df.ix[inx]['sid']:
if f_cnt == 1:
f_cnt = 2
f_keep = inx
continue
if f_cnt > 1:
df = df.drop(f_keep)
f_keep = inx
else:
f_cnt = 1
f_slid = df.ix[inx]['sid']
continue
return df
def __ftyping(self, fsid, feid, ftype):
if ftype == "duration":
return feid - fsid
elif ftype == "end":
return feid
def __ftuple(self, stat, ftp, gnd, ftype):
if (stat == "init") or (stat == "singular"):
ftv = self.__ftyping(gnd.fid, gnd.fid, ftype)
return [gnd.fid, ftv, gnd.sid]
elif stat == "found_pair":
ftv = self.__ftyping(ftp[0], gnd.fid, ftype)
ftp[1] = ftv
return ftp
def segments_df(self, df, ftype="duration"):
"""
Return dataframe version of segments
"""
segs = self.segments(df, ftype)
cols = ['fstart', ftype, 'sid']
df = pd.DataFrame(segs, columns=cols)
return df
def segments(self, df, ftype="duration"):
"""
df: columns should be like `abs_pairs`.
ftype: control the return data with "duration"(default) or
just "end" frame id
Return ground truth of segments, should return a list of
[fstart, duration, sid]
"""
f_start = -1
seg = [] # segment for global use
flp = [df.iloc[0].fid] # segment for local use
for si in df.index:
gnd = df.ix[si]
if f_start < 0:
flp = self.__ftuple("init", flp, gnd, ftype)
f_start = gnd.sid
elif f_start == gnd.sid:
flp = self.__ftuple("found_pair", flp, gnd, ftype)
seg.append(flp[:])
f_start = -1
else: # single point
seg.append(flp[:])
flp = self.__ftuple("singular", flp, gnd, ftype)
f_start = gnd.sid
if flp is not None:
seg.append(flp[:])
return seg
def shrink(self, df):
"""
df: `fid`, `sid` pairs
Get shrink data from original matched pairs
Return a cloned copy of original input
example:
df.sid = [1, 1, 1, 1, 2, 2, 3, 3, 4, 4]
return should be [1, 2, 3, 4]
"""
f_sid = -1
ret = df.copy(deep=True)
for di, dd in df.iterrows():
if f_sid < 0: # init
f_sid = dd.sid
elif f_sid == dd.sid:
ret = ret.drop(di)
continue
elif f_sid != dd.sid:
f_sid = dd.sid
return ret
def add_mark(self, df, sid=None, fid=None, ftype=None):
"""
Add mark to dataframe table, **not saved**.
df: should come from `abs_pairs`
"""
db = DfExt(df)
result = db.insert(sid, fid, ftype)
return result
def answer(self, fid):
"""
Get sid by given fid
"""
df = self._preload("segments")
dfi = df[df.fstart <= fid]
ret = -1
if len(dfi) > 0:
dfi = dfi.iloc[-1]
fs = dfi.fstart
fe = dfi.fstart + dfi.duration
if fs <= fid <= fe:
ret = dfi.sid
return ret
def guess(self, fid, sid):
"""
Return true if a correct match
"""
asid = self.answer(fid)
return asid == sid
def __load_seg(self):
seg = self.load("segments")
self.__check_segments(seg)
return seg
def __check_segments(self, seg):
if (seg is None) or (len(seg) == 0):
raise Exception("Error", "Segments not exist")
def update_segment(self):
"""
Fix unmarked frames from univ_07 groudtruth
"""
seg = self.__load_seg()
gfp = "data/{}/{}/ground_truth".format(self.root, self.name)
gdf = pd.read_csv(gfp, names=["fid", "sid", "stype", "ctype"])
pgd = gdf.iloc[0]
drs = "duration"
for gi, gd in gdf[1:].iterrows():
if (pgd.sid != gd.sid) & (pgd.sid != -1):
fdiff = gd.fid - pgd.fid - 1
st = seg[seg.fstart < pgd.fid].iloc[-1]
seg.ix[st.name, drs] = seg.ix[st.name][drs] + fdiff
pgd = gd
self.save("segments", seg)
print "Finished update segments"
| agpl-3.0 |
alekz112/statsmodels | examples/incomplete/arima.py | 34 | 1605 | from __future__ import print_function
from statsmodels.datasets.macrodata import load_pandas
from statsmodels.tsa.base.datetools import dates_from_range
from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
plt.interactive(False)
# let's examine an ARIMA model of CPI
cpi = load_pandas().data['cpi']
dates = dates_from_range('1959q1', '2009q3')
cpi.index = dates
res = ARIMA(cpi, (1, 1, 1), freq='Q').fit()
print(res.summary())
# we can look at the series
cpi.diff().plot()
# maybe logs are better
log_cpi = np.log(cpi)
# check the ACF and PCF plots
acf, confint_acf = sm.tsa.acf(log_cpi.diff().values[1:], confint=95)
# center the confidence intervals about zero
#confint_acf -= confint_acf.mean(1)[:, None]
pacf = sm.tsa.pacf(log_cpi.diff().values[1:], method='ols')
# confidence interval is now an option to pacf
from scipy import stats
confint_pacf = stats.norm.ppf(1 - .025) * np.sqrt(1 / 202.)
fig = plt.figure()
ax = fig.add_subplot(121)
ax.set_title('Autocorrelation')
ax.plot(range(41), acf, 'bo', markersize=5)
ax.vlines(range(41), 0, acf)
ax.fill_between(range(41), confint_acf[:, 0], confint_acf[:, 1], alpha=.25)
fig.tight_layout()
ax = fig.add_subplot(122, sharey=ax)
ax.vlines(range(41), 0, pacf)
ax.plot(range(41), pacf, 'bo', markersize=5)
ax.fill_between(range(41), -confint_pacf, confint_pacf, alpha=.25)
#NOTE: you'll be able to just to this when tsa-plots is in master
#sm.graphics.acf_plot(x, nlags=40)
#sm.graphics.pacf_plot(x, nlags=40)
# still some seasonality
# try an arma(1, 1) with ma(4) term
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 4 | 6956 | """
Testing for the boost module (sklearn.ensemble.boost).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_iris():
"""Check consistency on dataset iris."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor()
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target)
staged_scores = [s for s in clf.staged_score(iris.data, iris.target)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10)
clf.fit(boston.data, boston.target)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target)
staged_scores = [s for s in clf.staged_score(boston.data, boston.target)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier()
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor()
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y)
assert_raises(TypeError,
AdaBoostClassifier(base_estimator=DummyRegressor()).fit,
X, y)
assert_raises(TypeError,
AdaBoostRegressor(base_estimator=DummyClassifier()).fit,
X, y)
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor())
clf.fit(X, y)
clf = AdaBoostRegressor(SVR())
clf.fit(X, y)
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
domenkavran/scikit-learn_projects | scikit-learn-MNIST_SVM_one-vs-all/SVM_classification.py | 1 | 1778 |
# coding: utf-8
# In[1]:
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn import datasets
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.model_selection import train_test_split
# In[2]:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
mnist
X, y = mnist["data"], mnist["target"]
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000],y[60000:]
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# In[3]:
classificators = [None] * 10
Cs = [0.001, 0.01, 0.1, 1, 10]
kernels = ['linear']
probability = [True]
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, train_size=6000, test_size=1000, random_state=42)
#smaller train and test sets to speed up the process - consequently smaller precision and recall
for index in range(10):
y_train_tmp = (y_train == index)
svc = SVC()
parameters = {'C': Cs,'kernel':kernels, 'probability':probability}
grid_search = GridSearchCV(svc, parameters, n_jobs=-1, cv=3)
grid_search.fit(X_train, y_train_tmp)
svc = grid_search.best_estimator_
classificators[index] = svc
# In[4]:
probabilities = [None] * 10
for index, classificator in enumerate(classificators):
probabilities[index] = classificator.predict_proba(X_test)[:,1]
probabilities = np.asarray(probabilities)
predictions = probabilities.argmax(axis=0)
# In[6]:
print(classification_report(predictions,y_test),"\n",confusion_matrix(y_test,predictions)) #around 0.86 both precision and recall
| mit |
omdv/robinhood-portfolio | backend/robinhood_data.py | 1 | 9271 | """
Download and prepare robinhood data
"""
import pandas as pd
import numpy as np
class RobinhoodData:
"""
Wrapper to download orders and dividends from Robinhood accounts
Downloads two dataframes and saves to datafolder
----------
Parameters:
datafolder : location of data
client : connected pyrh client
"""
def __init__(self, datafolder, client):
self.datafolder = datafolder
self.client = client
def _get_symbol_from_instrument_url(self, url):
return self._fetch_json_by_url(url)['symbol']
# private method for getting all orders
def _fetch_json_by_url(self, url):
return self.client.session.get(url).json()
# deleting sensitive or redundant fields
def _delete_sensitive_fields(self, df):
for col in ['account', 'url', 'id', 'instrument']:
if col in df:
del df[col]
return df
# download orders and fields requiring RB client
def _download_orders(self):
print("Downloading orders from Robinhood")
orders = []
past_orders = self.client.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
next_url = past_orders['next']
past_orders = self._fetch_json_by_url(next_url)
orders.extend(past_orders['results'])
df = pd.DataFrame(orders)
df['symbol'] = df['instrument'].apply(
self._get_symbol_from_instrument_url)
df.sort_values(by='created_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_ord = self._delete_sensitive_fields(df)
return df_ord
# download dividends and fields requiring RB client
def _download_dividends(self):
print("Downloading dividends from Robinhood")
dividends = self.client.dividends()
dividends = [x for x in dividends['results']]
df = pd.DataFrame(dividends)
if df.shape[0] > 0:
df['symbol'] = df['instrument'].apply(
self._get_symbol_from_instrument_url)
df.sort_values(by='paid_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_div = self._delete_sensitive_fields(df)
else:
df_div = pd.DataFrame(columns=['symbol', 'amount', 'position',
'rate', 'paid_at', 'payable_date'])
return df_div
# process orders
def _process_orders(self, df_ord):
# assign to df and reduce the number of fields
df = df_ord.copy()
fields = [
'created_at',
'average_price', 'cumulative_quantity', 'fees',
'symbol', 'side']
df = df[fields]
# convert types
for field in ['average_price', 'cumulative_quantity', 'fees']:
df[field] = pd.to_numeric(df[field])
for field in ['created_at']:
df[field] = pd.to_datetime(df[field])
# normalize dates
idx = pd.Index(df['created_at']).normalize()
df['date'] = idx
# rename columns for consistency
df.rename(columns={
'cumulative_quantity': 'current_size'
}, inplace=True)
# quantity accounting for side of transaction for cumsum later
df['signed_size'] = np.where(
df.side == 'buy',
df['current_size'],
-df['current_size'])
df['signed_size'] = df['signed_size'].astype(np.int64)
# initialize columns
df['realized_gains'] = 0.
df['current_cost_basis'] = 0.
return df
# process_orders
def _process_dividends(self, df_div):
df = df_div.copy()
# convert types
for field in ['amount', 'position', 'rate']:
df[field] = pd.to_numeric(df[field])
for field in ['paid_at', 'payable_date']:
df[field] = pd.to_datetime(df[field])
# normalize dates
idx = pd.Index(df['paid_at']).normalize()
df['date'] = idx
return df
def _generate_positions(self, df_ord):
"""
Process orders dataframe and generate open and closed positions.
For all open positions close those which were later sold, so that
the cost_basis for open can be calculated correctly. For closed
positions calculate the cost_basis based on the closed open positions.
Note: the olders open positions are first to be closed. The logic here
is to reduce the tax exposure.
-----
Parameters:
- Pre-processed df_ord
Return:
- Two dataframes with open and closed positions correspondingly
"""
# prepare dataframe for open and closed positions
df_open = df_ord[df_ord.side == 'buy'].copy()
df_closed = df_ord[df_ord.side == 'sell'].copy()
# create a new column for today's position size
df_open['final_size'] = df_open['current_size']
df_closed['final_size'] = df_closed['current_size']
# main loop
for i_closed, row_closed in df_closed.iterrows():
sell_size = row_closed.final_size
sell_cost_basis = 0
for i_open, _ in df_open[
(df_open.symbol == row_closed.symbol) &
(df_open.date < row_closed.date)].iterrows():
new_sell_size = sell_size - df_open.loc[i_open, 'final_size']
new_sell_size = 0 if new_sell_size < 0 else new_sell_size
new_open_size = df_open.loc[i_open, 'final_size'] - sell_size
new_open_size = new_open_size if new_open_size > 0 else 0
# updating open positions
df_open.loc[i_open, 'final_size'] = new_open_size
# updating closed positions
df_closed.loc[i_closed, 'final_size'] = new_sell_size
sold_size = sell_size - new_sell_size
sell_cost_basis +=\
df_open.loc[i_open, 'average_price'] * sold_size
sell_size = new_sell_size
# assign a cost_basis to the closed position
df_closed.loc[i_closed, 'current_cost_basis'] = -sell_cost_basis
# calculate cost_basis for open positions
df_open['current_cost_basis'] =\
df_open['current_size'] * df_open['average_price']
df_open['final_cost_basis'] =\
df_open['final_size'] * df_open['average_price']
# calculate capital gains for closed positions
if df_closed.shape[0] != 0:
df_closed['realized_gains'] =\
df_closed['current_size'] * df_closed['average_price'] +\
df_closed['current_cost_basis']
df_closed['final_cost_basis'] = 0
return df_open, df_closed
def download(self, orders=None, dividends=None):
"""
Download and parse method
"""
if dividends is None:
dividends = self._download_dividends()
df_div = self._process_dividends(dividends)
df_div.to_pickle(self.datafolder + "dividends.pkl")
if orders is None:
orders = self._download_orders()
df_ord = self._process_orders(orders)
df_ord.to_pickle(self.datafolder + "orders.pkl")
df_open, df_closed = self._generate_positions(df_ord)
df_open.to_pickle(self.datafolder + "open.pkl")
df_closed.to_pickle(self.datafolder + "closed.pkl")
return df_div, df_ord, df_open, df_closed
def demo_orders(self):
"""
Generate demo orders dataframe for testing
"""
orders = pd.DataFrame(index=range(11))
orders['created_at'] = pd.Timestamp('2018-01-02', tz='UTC')
orders['date'] = pd.Timestamp('2018-01-02', tz='UTC')
orders['symbol'] = ['MSFT', 'AAPL', 'CVX', 'XOM', 'BND', 'CAT', 'BA', 'TIF', 'BAC', 'JPM', 'MSFT']
orders['current_size'] = 100
orders['signed_size'] = 100
orders['average_price'] = 100.0
orders['fees'] = 0
orders['cumulative_quantity'] = 100
orders['side'] = 'buy'
orders.to_pickle(self.datafolder + 'orders.pkl')
# one sell order
orders.loc[10, 'side'] = 'sell'
orders.loc[10, 'average_price'] = 120.
orders.loc[10, 'created_at'] = pd.Timestamp('2020-01-03', tz='UTC')
orders.loc[10, 'date'] = pd.Timestamp('2020-01-03', tz='UTC')
orders.loc[10, 'cumulative_quantity'] = 20
orders.loc[10, 'signed_size'] = -20
orders.loc[10, 'current_size'] = 20
orders.loc[10, 'fees'] = 0
return orders
def demo_dividends(self):
"""
Generate demo dividends dataframe for testing
"""
dividends = pd.DataFrame(index=range(10))
dividends['symbol'] = ['MSFT', 'AAPL', 'CVX', 'XOM', 'BND', 'CAT', 'BA', 'TIF', 'BAC', 'JPM']
dividends['position'] = 100
dividends['amount'] = [20, 30, 40, 50, 60, 70, 80, 90, 100, 110]
dividends['rate'] = dividends['amount'] / dividends['position']
dividends['paid_at'] = pd.Timestamp('2019-01-15', tz='UTC')
dividends['payable_date'] = pd.Timestamp('2019-01-02', tz='UTC')
return dividends
if __name__ == "__main__":
pass
| mit |
Sentient07/scikit-learn | examples/exercises/plot_cv_diabetes.py | 27 | 2775 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
tuned_parameters = [{'alpha': alphas}]
n_folds = 3
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)
clf.fit(X, y)
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
prheenan/Research | Perkins/Projects/PythonCommandLine/InverseWeierstrass/UnitTest/test_equality.py | 1 | 1086 | # force floating point division. Can still use integer with //
from __future__ import division
# other good compatibility recquirements for python3
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
def run():
"""
<Description>
Args:
param1: This is the first param.
Returns:
This is a description of what is returned.
"""
f1 = sys.argv[1]
f2 = sys.argv[2]
arr1 = np.loadtxt(f1,delimiter=",",skiprows=2)
arr2 = np.loadtxt(f2,delimiter=",",skiprows=2)
plt.plot(arr1[:,0]*1e9,arr1[:,1]/4.1e-21,'r--',label=r"$\Delta G^0$, debugged")
plt.plot(arr2[:,0]*1e9,arr2[:,1]/4.1e-21,label=r"$\Delta G^0$, previous")
plt.xlabel("Extension (nm)")
plt.ylabel("Delta G (kT)")
plt.legend()
plt.show()
tol = 1e-6
np.testing.assert_allclose(arr1,arr2,rtol=tol,atol=0,verbose=True)
if __name__ == "__main__":
run()
| gpl-3.0 |
beni55/copperhead | samples/of_cg.py | 5 | 5389 | #
# Copyright 2008-2010 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from copperhead import *
import numpy as np
import matplotlib as mat
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import plac
import urllib
@cu
def axpy(a, x, y):
return [a * xi + yi for xi, yi in zip(x, y)]
@cu
def dot(x, y):
return sum(map(op_mul, x, y))
@cu
def vadd(x, y):
return map(op_add, x, y)
@cu
def vmul(x, y):
return map(op_mul, x, y)
@cu
def vsub(x, y):
return map(op_sub, x, y)
@cu
def of_spmv((du, dv), width, (m1, m2, m3, m4, m5, m6, m7)):
e = vadd(vmul(m1, du), vmul(m2, dv))
f = vadd(vmul(m2, du), vmul(m3, dv))
e = vadd(e, vmul(m4, shift(du, -width, float32(0.0))))
f = vadd(f, vmul(m4, shift(dv, -width, float32(0.0))))
e = vadd(e, vmul(m5, shift(du, -1, float32(0.0))))
f = vadd(f, vmul(m5, shift(dv, -1, float32(0.0))))
e = vadd(e, vmul(m6, shift(du, 1, float32(0.0))))
f = vadd(f, vmul(m6, shift(dv, 1, float32(0.0))))
e = vadd(e, vmul(m7, shift(du, width, float32(0.0))))
f = vadd(f, vmul(m7, shift(dv, width, float32(0.0))))
return (e, f)
@cu
def zeros(x):
return [float32(0.0) for xi in x]
eps = 1e-6
@cu
def init_cg(V, D, width, A):
u, v = of_spmv(V, width, A)
du, dv = D
ur = vsub(du, u)
vr = vsub(dv, v)
return ur, vr
@cu
def precondition(u, v, (p1, p2, p3)):
e = vadd(vmul(p1, u), vmul(p2, v))
f = vadd(vmul(p2, u), vmul(p3, v))
return e, f
@cu
def pre_cg_iteration(width, V, R, D, Z, A, P):
ux, vx = V
ur, vr = R
uz, vz = Z
ud, vd = D
uAdi, vAdi = of_spmv(D, width, A)
urnorm = dot(ur, uz)
vrnorm = dot(vr, vz)
rnorm = urnorm + vrnorm
udtAdi = dot(ud, uAdi)
vdtAdi = dot(vd, vAdi)
dtAdi = udtAdi + vdtAdi
alpha = rnorm / dtAdi
ux = axpy(alpha, ud, ux)
vx = axpy(alpha, vd, vx)
urp1 = axpy(-alpha, uAdi, ur)
vrp1 = axpy(-alpha, vAdi, vr)
uzp1, vzp1 = precondition(urp1, vrp1, P)
urp1norm = dot(urp1, uzp1)
vrp1norm = dot(vrp1, vzp1)
beta = (urp1norm + vrp1norm)/rnorm
udp1 = axpy(beta, uzp1, urp1)
vdp1 = axpy(beta, vzp1, vrp1)
return (ux, vx), (urp1, vrp1), (udp1, vdp1), (uzp1, vzp1), rnorm
@cu
def form_preconditioner(m1, m2, m3):
def indet(a, b, c):
return 1.0/(a * c - b * b)
indets = map(indet, m1, m2, m3)
p1 = map(op_mul, indets, m3)
p2 = map(lambda a, b: -a * b, indets, m2)
p3 = map(op_mul, indets, m1)
return p1, p2, p3
@cu
def pre_cg_solver(it, width, V, R, D, Z, A, P):
if (it > 0):
V, R, D, Z, rnorm = \
pre_cg_iteration(width, V, R, D, Z, A, P)
return pre_cg_solver(it-1, width, V, R, D, Z, A, P)
else:
return V
def cg(it, A, width, V, D):
print("Solving...")
m1, m2, m3, m4, m5, m6, m7 = A
P = form_preconditioner(m1, m2, m3)
R = init_cg(V, D, width, A)
ur, vr = R
Z = precondition(ur, vr, P)
D = R
return pre_cg_solver(it, width, V, R, D, Z, A, P)
def initialize_data(file_name):
print("Reading data from file")
if not file_name:
file_name, headers = urllib.urlretrieve('http://copperhead.github.com/data/Urban331.npz')
npz = np.load(file_name)
width = npz['width'].item()
height = npz['height'].item()
npixels = width * height
m1 = cuarray(npz['m1'])
m2 = cuarray(npz['m2'])
m3 = cuarray(npz['m3'])
m4 = cuarray(npz['m4'])
m5 = cuarray(npz['m5'])
m6 = cuarray(npz['m6'])
m7 = cuarray(npz['m7'])
A = (m1, m2, m3, m4, m5, m6, m7)
du = cuarray(npz['du'])
dv = cuarray(npz['dv'])
D = (du, dv)
ux = cuarray(np.zeros(npixels, dtype=np.float32))
vx = cuarray(np.zeros(npixels, dtype=np.float32))
V = (ux, vx)
img = npz['img']
return(A, V, D, width, height, img)
def plot_data(image, width, height, V):
plt.subplot(121)
plt.imshow(image[10:110, 10:110])
plt.subplot(122)
ux, vx = V
u = to_numpy(ux)
v = to_numpy(vx)
u = np.reshape(u, [height,width])
v = np.reshape(v, [height,width])
x, y = np.meshgrid(np.arange(0, 100), np.arange(99, -1, -1))
plt.quiver(x, y, u[10:110,10:110], v[10:110, 10:110], angles='xy')
plt.show()
@plac.annotations(data_file="""Filename of Numpy data file for this problem.
If none is found, a default dataset will be loaded from
http://copperhead.github.com/data/Urban331.npz""")
def main(data_file=None):
"""Performs a Preconditioned Conjugate Gradient solver for a particular
problem found in Variational Optical Flow methods for video analysis."""
A, V, D, width, height, image = initialize_data(data_file)
VX = cg(100, A, width, V, D)
plot_data(image, width, height, VX)
if __name__ == '__main__':
plac.call(main)
| apache-2.0 |
futurulus/scipy | scipy/spatial/kdtree.py | 11 | 37913 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=object)
ii = np.empty(retshape,dtype=object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=float)
dd.fill(np.inf)
ii = np.empty(k,dtype=int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + \
traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = zip(x.ravel(), y.ravel())
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from `other`, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n,other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i],other.data[j],p)
if d <= max_distance:
result[i,j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1,rect1,node2.less,less)
traverse(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less,less,node2,rect2)
traverse(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2)
traverse(node1.less,less1,node2.greater,greater2)
traverse(node1.greater,greater1,node2.less,less2)
traverse(node1.greater,greater1,node2.greater,greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
TODO: description needed
y : (N, K) array_like
TODO: description needed
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Distance matrix.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| bsd-3-clause |
maestrotf/pymepps | docs/examples/example_plot_stationnc.py | 2 | 2009 | """
Load station data based on NetCDF files
=======================================
In this example we show how to load station data based on NetCDF files.
The data is loaded with the pymepps package. Thanks to Ingo Lange we
could use original data from the Wettermast for this example. In the
following the data is loaded, plotted and saved as json file.
"""
import pymepps
import matplotlib.pyplot as plt
######################################################################
# We could use the global pymepps open\_station\_dataset function to open
# the Wettermast data. We have to specify the data path and the data type.
#
wm_ds = pymepps.open_station_dataset('../data/station/wettermast.nc', 'nc')
print(wm_ds)
######################################################################
# Now we could extract the temperature in 2 m height. For this we use the
# select method of the resulted dataset.
#
t2m = wm_ds.select('TT002_M10')
print(type(t2m))
print(t2m.describe())
######################################################################
# We could see that the resulting temperature is a normal pandas.Series.
# So it is possible to use all pandas methods, e.g. plotting of the
# Series.
#
t2m.plot()
plt.xlabel('Date')
plt.ylabel('Temperature in °C')
plt.title('Temperature at the Wettermast Hamburg')
plt.show()
######################################################################
# Pymepps uses an accessor to extend the pandas functionality. The
# accessor could be accessed with Series.pp. At the moment there is only a
# lonlat attribute, update, save and load method defined, but it is
# planned to expand the number of additional methods.
#
print(t2m.pp.lonlat)
######################################################################
# We could see that the logitude and latitude are None at the moment,
# because we haven't set the yet. We could either set them directly or set
# the coordintes in the open\_station\_dataset function with the lonlat
# argument.
# | gpl-3.0 |
Chinmoy07/ML-Term-Project-Team-Pikachu | Python-PHP-Codes/svm_classifier.py | 1 | 1555 | from sklearn import svm
import matplotlib.pyplot as plt
import json
import numpy as np
training_length = 0.60
with open("../Output-files/features.json", "r") as features_file:
data = json.load( features_file )
X = []
Y = []
index_feat = data["Index Features"]
features_key = [
"Moving Average",
"Price Momentum Oscillator",
"Relative Strength Index",
"On Balance Volume",
"Stochastic Oscillator",
"Sentiment Strength"
]
for i in range( 0, len( index_feat ) ):
temp_arr = []
for j in range( 0, len( features_key ) ):
temp_arr.append( index_feat[i][ features_key[j] ] )
X.append( temp_arr )
Y.append( index_feat[i]["Target"] )
X = np.array( X )
Y = np.array( Y )
X_train = X[ : int( training_length*len( X ) ) ]
Y_train = Y[ : int( training_length*len( Y ) ) ]
X_test = X[ int( training_length*len( X ) ) : ]
Y_test = Y[ int( training_length*len( Y ) ) : ]
clf = svm.SVC( kernel='rbf' )
clf.fit( X_train, Y_train )
Y_pred = clf.predict( X_test )
cnt = 0
pred_file = open("../Output-files/Predictions.txt", "w")
pred_file.write("Actual Predicted\n")
for i in range( 0, len( Y_pred ) ):
pred_file.write( (' '+str(Y_test[i]))[-2:] + " " + (' '+str(Y_pred[i]))[-2:] + "\n" )
if( Y_pred[i] != Y_test[i] ):
cnt+=1
print( "Prediction done with accuracy : " + str( ( len( Y_pred ) - cnt )/( len( Y_pred ) ) ) + " and predictions saved to Output-files/Predictions.txt")
| mit |
alfonsokim/nupic | src/nupic/algorithms/monitor_mixin/plot.py | 20 | 5229 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import os
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
# Suppress this optional dependency on matplotlib. NOTE we don't log this,
# because python logging implicitly adds the StreamHandler to root logger when
# calling `logging.debug`, etc., which may undermine an application's logging
# configuration.
plt = None
cm = None
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| agpl-3.0 |
goulu/networkx | networkx/drawing/tests/test_pylab.py | 9 | 1133 | """Unit tests for matplotlib drawing functions."""
import os
from nose import SkipTest
import networkx as nx
class TestPylab(object):
@classmethod
def setupClass(cls):
global plt
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = False
except ImportError:
raise SkipTest('matplotlib not available.')
except RuntimeError:
raise SkipTest('matplotlib not available.')
def setUp(self):
self.G = nx.barbell_graph(5,10)
def test_draw(self):
try:
N = self.G
nx.draw_spring(N)
plt.savefig('test.ps')
nx.draw_random(N)
plt.savefig('test.ps')
nx.draw_circular(N)
plt.savefig('test.ps')
nx.draw_spectral(N)
plt.savefig('test.ps')
nx.draw_spring(N.to_directed())
plt.savefig('test.ps')
finally:
try:
os.unlink('test.ps')
except OSError:
pass
| bsd-3-clause |
matthewalbani/scipy | scipy/interpolate/interpolate.py | 4 | 106038 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated. ("nearest" and "linear" kinds only.)
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(self.x, self._y, order=order)
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self._kind not in ('nearest', 'linear'):
raise ValueError("Extrapolation does not work with "
"kind=%s" % self._kind)
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# Reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
KordingLab/spykes | tests/ml/test_neuropop.py | 2 | 1911 | from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as p
from nose.tools import (
assert_true,
assert_equal,
assert_raises,
)
from spykes.ml.neuropop import NeuroPop
from spykes.utils import train_test_split
np.random.seed(42)
p.switch_backend('Agg')
def test_neuropop():
np.random.seed(1738)
num_neurons = 10
for num_neurons in [1, 10]:
for tunemodel in ['glm', 'gvm']:
for i in range(2):
pop = NeuroPop(tunemodel=tunemodel, n_neurons=num_neurons,
verbose=True)
if i == 0:
pop.set_params()
else:
pop.set_params(mu=np.random.randn(),
k0=np.random.randn(),
k=np.random.randn(),
g=np.random.randn(),
b=np.random.randn())
x, Y, mu, k0, k, g, b = pop.simulate(tunemodel)
_helper_test_neuropop(pop, num_neurons, x, Y)
def _helper_test_neuropop(pop, num_neurons, x, Y):
# Splits into training and testing parts.
x_split, Y_split = train_test_split(x, Y, percent=0.5)
(x_train, x_test), (Y_train, Y_test) = x_split, Y_split
pop.fit(x_train, Y_train)
Yhat_test = pop.predict(x_test)
assert_equal(Yhat_test.shape[0], x_test.shape[0])
assert_equal(Yhat_test.shape[1], num_neurons)
Ynull = np.mean(Y_train, axis=0)
score = pop.score(Y_test, Yhat_test, Ynull, method='pseudo_R2')
assert_equal(len(score), num_neurons)
xhat_test = pop.decode(Y_test)
assert_equal(xhat_test.shape[0], Y_test.shape[0])
for method in ['circ_corr', 'cosine_dist']:
score = pop.score(x_test, xhat_test, method=method)
pop.display(x, Y, 0)
pop.display(x, Y, 0, xjitter=True, yjitter=True)
| mit |
justincassidy/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
mailhexu/pyDFTutils | build/lib/pyDFTutils/phonon/parser.py | 2 | 16721 | #!/usr/bin/env python
import os
import numpy as np
from ase.data import chemical_symbols
import matplotlib.pyplot as plt
from abipy.abilab import abiopen
from pyDFTutils.perovskite.perovskite_mode import label_zone_boundary, label_Gamma
from ase.units import Ha
from spglib import spglib
def displacement_cart_to_evec(displ_cart,
masses,
scaled_positions,
qpoint=None,
add_phase=True):
"""
displ_cart: cartisien displacement. (atom1_x, atom1_y, atom1_z, atom2_x, ...)
masses: masses of atoms.
scaled_postions: scaled postions of atoms.
qpoint: if phase needs to be added, qpoint must be given.
add_phase: whether to add phase to the eigenvectors.
"""
if add_phase and qpoint is None:
raise ValueError('qpoint must be given if adding phase is needed')
m = np.sqrt(np.kron(masses, [1, 1, 1]))
evec = displ_cart * m
if add_phase:
phase = [
np.exp(-2j * np.pi * np.dot(pos, qpoint))
for pos in scaled_positions
]
phase = np.kron(phase, [1, 1, 1])
evec *= phase
evec /= np.linalg.norm(evec)
return evec
def ixc_to_xc(ixc):
"""
translate ixc (positive: abinit. negative: libxc) to XC.
"""
xcdict = {
0: 'NO-XC',
1: 'LDA',
2: 'LDA-PZCA',
3: 'LDA-CA',
4: 'LDA-Winger',
5: 'LDA-Hedin-Lundqvist',
6: 'LDA-X-alpha',
7: 'LDA-PW92',
8: 'LDA-PW92-xonly',
9: 'LDA-PW92-xRPA',
11: 'GGA-PBE',
12: 'GGA-PBE-xonly',
14: 'GGA-revPBE',
15: 'GGA-RPBE',
16: 'GGA-HTCH93',
17: 'GGA-HTCH120',
23: 'GGA-WC',
40: 'Hartree-Fock',
41: 'GGA-PBE0',
42: 'GGA-PBE0-1/3',
-1009: 'LDA-PZCA',
-101130: 'GGA-PBE',
-106131: 'GGA-BLYP',
-106132: 'GGA-BP86',
-116133: 'GGA-PBEsol',
-118130: 'GGA-WC',
}
if ixc in xcdict:
return xcdict[ixc]
else:
return 'libxc_%s' % ixc
class mat_data():
def __init__(self,
name,
mag='PM',
description="None",
author='High Throughput Bot',
email='[email protected]',
is_verified=False,
verification_info="",
tags=[]
):
self._already_in_db = False
self.name = name
self.db_directory = None
self.all_data_directory = None
self.mag = mag
self.insert_time = None
self.update_time = None
self.log = ""
self.description = description
self.author = author
self.email = email
self.tags=tags
self.is_verified = is_verified
self.verification_info = verification_info
# properties in database. should be band | phonon
self.has_ebands = False
self.has_phonon = False
self.is_cubic_perovskite = True
self.cellpar = [0] * 6
self.cell = [0]*9
self.natoms = 0
self.chemical_symbols = []
self.masses = []
self.scaled_positions = []
self.ispin = 0
self.spinat = []
self.spgroup = 1
self.spgroup_name = 'P1'
self.ixc = 1
self.XC = 'PBEsol'
self.pp_type = 'ONCV'
self.pp_info = 'Not implemented yet.'
self.U_type=0
self.species=[]
self.zion=[]
self.U_l=[]
self.U_u=[]
self.U_j=[]
self.GSR_parameters = {}
self.energy = 0
self.efermi = 0
self.bandgap = 0
self.ebands = {}
self.kptrlatt=[]
self.usepaw=0
self.pawecutdg=0.0
self.nsppol=1
self.nspden=1
self.emacro = [0.0] * 9
self.becs = {}
self.elastic = []
self.nqpts = [1, 1, 1]
self.special_qpts = {}
self.phonon_mode_freqs = {}
self.phonon_mode_names = {}
self.phonon_mode_evecs = {}
self.phonon_mode_phdispl = {}
self.phonon_mode_freqs_LOTO = {}
self.phonon_mode_names_LOTO = {}
self.phonon_mode_evecs_LOTO = {}
self.phonon_mode_phdispl_LOTO = {}
def read_BAND_nc(self, fname, outputfile='Ebands.png', plot_ebands=True):
try:
band_file = abiopen(fname)
self.has_ebands = True
except Exception:
raise IOError("can't read %s" % fname)
self.efermi = band_file.energy_terms.e_fermie
gap = band_file.ebands.fundamental_gaps
if len(gap) != 0:
for g in gap:
self.gap = g.energy
self.is_direct_gap = g.is_direct
self.bandgap = self.gap
if plot_ebands:
fig, ax = plt.subplots()
fig = band_file.ebands.plot(ax=ax, show=False, ylims=[-7, 5])
fig.savefig(outputfile)
def read_OUT_nc(self, fname):
f = abiopen(fname)
self.invars = f.get_allvars()
for key in self.invars:
if isinstance(self.invars[key], np.ndarray):
self.invars[key] = tuple(self.invars[key])
self.spgroup = f.spgroup[0]
self.ixc = f.ixc[0]
self.XC = ixc_to_xc(self.ixc)
self.ecut = f.ecut[0]
self.species = [chemical_symbols[int(i)] for i in f.znucl]
if 'usepawu' in self.invars:
self.U_type= f.usepawu[0]
else:
self.U_type= 0
if self.U_type:
self.U_l = f.lpawu
self.U_u= [ x * Ha for x in f.upawu]
self.U_j= [ x* Ha for x in f.jpawu ]
#self.nband = f.nband[0]
self.kptrlatt = tuple(f.kptrlatt)
def print_scf_info(self):
for key, val in self.invars:
print("%s : %s\n" % (key, val))
def read_GSR_nc(self, fname):
f = abiopen(fname)
self.energy = f.energy
self.stress_tensor = f.cart_stress_tensor # unit ?
self.forces = np.array(f.cart_forces) # unit eV/ang
def read_DDB(self,
fname=None,
do_label=True,
workdir=None,
phonon_output_dipdip='phonon_band_dipdip.png',
phonon_output_nodipdip='phonon_band_nodipdip.png'):
"""
read phonon related properties from DDB file.
"""
self.has_phonon = True
ddb = abiopen(fname)
self.ddb_header = ddb.header
self.atoms = ddb.structure.to_ase_atoms()
self.natoms = len(self.atoms)
self.cellpar = self.atoms.get_cell_lengths_and_angles()
self.cell=self.atoms.get_cell().flatten()
self.masses = self.atoms.get_masses()
self.scaled_positions = self.atoms.get_scaled_positions()
self.chemical_symbols = self.atoms.get_chemical_symbols()
self.spgroup_name = spglib.get_spacegroup(self.atoms,symprec=1e-4)
self.ixc = self.ddb_header['ixc']
self.XC = ixc_to_xc( self.ixc)
self.ispin = self.ddb_header['nsppol']
self.spinat = self.ddb_header['spinat']
self.nband = self.ddb_header['nband']
self.ecut = self.ddb_header['ecut']
self.tsmear =self.ddb_header['tsmear']
self.usepaw =self.ddb_header['usepaw']
self.pawecutdg = self.ddb_header['tsmear']
self.nsppol = self.ddb_header['nsppol']
self.nspden= self.ddb_header['nspden']
self.species = [chemical_symbols[int(i)] for i in self.ddb_header['znucl']]
self.zion = [int(x) for x in self.ddb_header['zion']]
self.znucl = [int(x) for x in self.ddb_header['znucl']]
emacror, becsr = ddb.anaget_emacro_and_becs()
emacro = emacror[0].cartesian_tensor
becs_array = becsr.values
becs = {}
for i, bec in enumerate(becs_array):
becs[str(i)] = bec
nqpts = ddb._guess_ngqpt()
qpts = tuple(ddb.qpoints.frac_coords)
self.emacro = emacro
self.becs = becs
self.nqpts = nqpts
self.qpts = qpts
for qpt in qpts:
qpt = tuple(qpt)
m = ddb.anaget_phmodes_at_qpoint(qpt)
#self.results['phonon'][qpt]['frequencies'] = m.phfreqs
#self.results['phonon'][qpt][
# 'eigen_displacements'] = m.phdispl_cart
qpoints, evals, evecs, edisps = self.phonon_band(
ddb,
lo_to_splitting=False,
phonon_output_dipdip=phonon_output_dipdip,
phonon_output_nodipdip=phonon_output_nodipdip)
#for i in range(15):
# print(evecs[0, :, i])
self.special_qpts = {
'X': (0, 0.5, 0.0),
'M': (0.5, 0.5, 0),
'R': (0.5, 0.5, 0.5)
}
zb_modes = self.label_zone_boundary_all(
qpoints, evals, evecs, label=do_label)
for qname in self.special_qpts:
self.phonon_mode_freqs[qname] = zb_modes[qname][0]
self.phonon_mode_names[qname] = zb_modes[qname][1]
self.phonon_mode_evecs[qname] = zb_modes[qname][2]
Gmodes = self.label_Gamma_all(qpoints, evals, evecs, label=do_label)
self.phonon_mode_freqs['Gamma'] = Gmodes[0]
self.phonon_mode_names['Gamma'] = Gmodes[1]
self.phonon_mode_evecs['Gamma'] = Gmodes[2]
def get_zb_mode(self, qname, mode_name):
"""
return the frequencies of mode name.
"""
ibranches = []
freqs = []
for imode, mode in enumerate(
self.results['phonon']['boundary_modes'][qname]):
freq, mname = mode
if mname == mode_name:
ibranches.append(imode)
freqs.append(freq)
return ibranches, freqs
def get_gamma_modes(self):
"""
return (Freqs, names, evecs)
"""
return self.phonon_mode_freqs['Gamma'], self.phonon_mode_names['Gamma'], self.phonon_mode_evecs['Gamma'],
def get_gamma_mode(self, mode_name):
"""
return the frequencies of mode name.
"""
ibranches = []
freqs = []
for imode, mode in enumerate(zip(self.phonon_mode_freqs['Gamma'], self.phonon_mode_names['Gamma'])):
freq, mname = mode
if mname == mode_name:
ibranches.append(imode)
freqs.append(freq)
return ibranches, freqs
def label_Gamma_all(self, qpoints, evals, evecs, label=True):
Gamma_mode_freqs = []
Gamma_mode_names = []
Gamma_mode_evecs = []
for i, qpt in enumerate(qpoints):
if np.isclose(qpt, [0, 0, 0], rtol=1e-5, atol=1e-3).all():
evecq = evecs[i]
for j, evec in enumerate(evecq.T):
freq = evals[i][j]
if label:
mode = label_Gamma(
evec=evec, masses=self.atoms.get_masses())
Gamma_mode_names.append(mode)
else:
Gamma_mode_names.append('')
Gamma_mode_freqs.append(freq)
Gamma_mode_evecs.append(np.real(evec))
return Gamma_mode_freqs, Gamma_mode_names, Gamma_mode_evecs
if Gamma_mode_names == []:
print("Warning: No Gamma point found in qpoints.\n")
return Gamma_mode_freqs, Gamma_mode_names, Gamma_mode_evecs
def label_zone_boundary_all(self, qpoints, evals, evecs, label=True):
mode_dict = {}
qdict = {'X': (0, 0.5, 0.0), 'M': (0.5, 0.5, 0), 'R': (0.5, 0.5, 0.5)}
for i, qpt in enumerate(qpoints):
for qname in qdict:
if np.isclose(qpt, qdict[qname], rtol=1e-5, atol=1e-3).all():
mode_freqs = []
mode_names = []
mode_evecs = []
#print "===================================="
#print qname
evecq = evecs[i]
for j, evec in enumerate(evecq.T):
freq = evals[i][j]
mode_freqs.append(freq)
if label:
mode = label_zone_boundary(qname, evec=evec)
mode_names.append(mode)
else:
mode_names.append('')
mode_evecs.append(np.real(evec))
mode_dict[qname] = (mode_freqs, mode_names, mode_evecs)
return mode_dict
def phonon_band(self,
ddb,
lo_to_splitting=False,
workdir=None,
phonon_output_dipdip='phonon_band_dipdip.png',
phonon_output_nodipdip='phonon_band_nodipdip.png',
show=False):
atoms = ddb.structure.to_ase_atoms()
if workdir is not None:
workdir_dip = os.path.join(workdir, '/phbst_dipdip')
#if os.path.exists(workdir_dip):
# os.system('rm -r %s' % workdir_dip)
else:
workdir_dip = None
phbst, phdos = ddb.anaget_phbst_and_phdos_files(
nqsmall=10,
asr=1,
chneut=1,
dipdip=1,
verbose=1,
lo_to_splitting=True,
anaddb_kwargs={'alphon': 1},
workdir=workdir_dip,
#qptbounds=kpath_bounds,
)
fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.tight_layout(pad=2.19)
#plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
ax.axhline(0, linestyle='--', color='black')
ax.set_title(self.name)
ticks, labels = phbst.phbands._make_ticks_and_labels(qlabels=None)
fig.axes[0].set_xlim([ticks[0],ticks[-1]])
fig = phbst.phbands.plot(
ax=ax,
units='cm-1',
match_bands=False,
linewidth=1.7,
color='blue',
show=False)
fig.axes[0].grid(False)
if show:
plt.show()
if phonon_output_dipdip:
fig.savefig(phonon_output_dipdip)
plt.close()
if workdir is not None:
workdir_nodip = os.path.join(workdir, 'phbst_nodipdip')
#if os.path.exists(workdir_dip):
# os.system('rm -r %s' % workdir_nodip)
else:
workdir_nodip = None
phbst, phdos = ddb.anaget_phbst_and_phdos_files(
nqsmall=5,
asr=1,
chneut=1,
dipdip=0,
verbose=1,
lo_to_splitting=False,
anaddb_kwargs={'alphon': 1},
workdir=workdir_nodip
#qptbounds=kpath_bounds,
)
fig, ax = plt.subplots(nrows=1, ncols=1)
#plt.tight_layout(pad=2.19)
#plt.axis('tight')
plt.gcf().subplots_adjust(left=0.17)
ax.axhline(0, linestyle='--', color='black')
ax.set_title(self.name)
ax.set_title(self.name)
ticks, labels = phbst.phbands._make_ticks_and_labels(qlabels=None)
fig.axes[0].set_xlim([ticks[0],ticks[-1]])
fig = phbst.phbands.plot(
ax=ax,
units='cm-1',
match_bands=False,
linewidth=1.4,
color='blue',
show=False)
fig.axes[0].grid(False)
if show:
plt.show()
if phonon_output_dipdip:
fig.savefig(phonon_output_nodipdip)
plt.close()
qpoints = phbst.qpoints.frac_coords
nqpts = len(qpoints)
nbranch = 3 * len(atoms)
evals = np.zeros([nqpts, nbranch])
evecs = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
edisps = np.zeros([nqpts, nbranch, nbranch], dtype='complex128')
masses = atoms.get_masses()
scaled_positions = atoms.get_scaled_positions()
for iqpt, qpt in enumerate(qpoints):
for ibranch in range(nbranch):
phmode = phbst.get_phmode(qpt, ibranch)
evals[iqpt, ibranch] = phmode.freq * 8065.6
evec = displacement_cart_to_evec(
phmode.displ_cart,
masses,
scaled_positions,
qpoint=qpt,
add_phase=False)
evecs[iqpt, :, ibranch] = evec / np.linalg.norm(evec)
edisps[iqpt, :, ibranch] = phmode.displ_cart
return qpoints, evals, evecs, edisps
def test():
m = mat_data()
m.read_BAND_nc('./BAND_GSR.nc')
m.read_OUT_nc('./OUT.nc')
m.read_DDB('out_DDB')
#test()
| lgpl-3.0 |
michigraber/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
ky822/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
jseabold/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backends/__init__.py | 21 | 2487 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
# the last argument is specifies whether to use absolute or relative
# imports. 0 means only perform absolute imports.
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name],0)
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return backend_mod, new_figure_manager, draw_if_interactive, show
| bsd-3-clause |
IshankGulati/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
nathanielvarona/airflow | docs/conf.py | 1 | 22928 | # flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import glob
import json
import os
import sys
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple
import yaml
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader # type: ignore[misc]
import airflow
from airflow.configuration import AirflowConfigParser, default_config_yaml
from docs.exts.docs_build.third_party_inventories import ( # pylint: disable=no-name-in-module,wrong-import-order
THIRD_PARTY_INDEXES,
)
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
CONF_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
INVENTORY_CACHE_DIR = os.path.join(CONF_DIR, '_inventory_cache')
ROOT_DIR = os.path.abspath(os.path.join(CONF_DIR, os.pardir))
FOR_PRODUCTION = os.environ.get('AIRFLOW_FOR_PRODUCTION', 'false') == 'true'
# By default (e.g. on RTD), build docs for `airflow` package
PACKAGE_NAME = os.environ.get('AIRFLOW_PACKAGE_NAME', 'apache-airflow')
PACKAGE_DIR: Optional[str]
if PACKAGE_NAME == 'apache-airflow':
PACKAGE_DIR = os.path.join(ROOT_DIR, 'airflow')
PACKAGE_VERSION = airflow.__version__
elif PACKAGE_NAME.startswith('apache-airflow-providers-'):
from provider_yaml_utils import load_package_data # pylint: disable=no-name-in-module
ALL_PROVIDER_YAMLS = load_package_data()
try:
CURRENT_PROVIDER = next(
provider_yaml
for provider_yaml in ALL_PROVIDER_YAMLS
if provider_yaml['package-name'] == PACKAGE_NAME
)
except StopIteration:
raise Exception(f"Could not find provider.yaml file for package: {PACKAGE_NAME}")
PACKAGE_DIR = CURRENT_PROVIDER['package-dir']
PACKAGE_VERSION = 'devel'
elif PACKAGE_NAME == 'helm-chart':
PACKAGE_DIR = os.path.join(ROOT_DIR, 'chart')
PACKAGE_VERSION = 'devel' # TODO do we care? probably
else:
PACKAGE_DIR = None
PACKAGE_VERSION = 'devel'
# Adds to environment variables for easy access from other plugins like airflow_intersphinx.
os.environ['AIRFLOW_PACKAGE_NAME'] = PACKAGE_NAME
if PACKAGE_DIR:
os.environ['AIRFLOW_PACKAGE_DIR'] = PACKAGE_DIR
os.environ['AIRFLOW_PACKAGE_VERSION'] = PACKAGE_VERSION
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = PACKAGE_NAME
# # The version info for the project you're documenting
version = PACKAGE_VERSION
# The full version, including alpha/beta/rc tags.
release = PACKAGE_VERSION
rst_epilog = f"""
.. |version| replace:: {version}
"""
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxarg.ext',
'sphinx.ext.intersphinx',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'airflow_intersphinx',
"sphinxcontrib.spelling",
'sphinx_airflow_theme',
'redirects',
'substitution_extensions',
]
if PACKAGE_NAME == 'apache-airflow':
extensions.extend(
[
'sphinxcontrib.jinja',
'sphinx.ext.graphviz',
'sphinxcontrib.httpdomain',
'sphinxcontrib.httpdomain',
'extra_files_with_substitutions',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
]
)
if PACKAGE_NAME == "apache-airflow-providers":
extensions.extend(
[
'operators_and_hooks_ref',
'providers_packages_ref',
]
)
elif PACKAGE_NAME == "helm-chart":
extensions.append("sphinxcontrib.jinja")
elif PACKAGE_NAME == "docker-stack":
# No extra extensions
pass
else:
extensions.append('autoapi.extension')
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str]
if PACKAGE_NAME == 'apache-airflow':
exclude_patterns = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
'README.rst',
]
elif PACKAGE_NAME.startswith('apache-airflow-providers-'):
exclude_patterns = ['operators/_partials']
else:
exclude_patterns = []
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
if PACKAGE_NAME == 'apache-airflow':
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob.glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
else:
exclude_patterns.extend(
_get_rst_filepath_from_path(f) for f in glob.glob(f"{PACKAGE_DIR}/**/example_dags/**/*.py")
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
if PACKAGE_NAME == 'apache-airflow':
html_title = "Airflow Documentation"
else:
html_title = f"{PACKAGE_NAME} Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
if PACKAGE_NAME == 'apache-airflow':
html_static_path = ['apache-airflow/static']
else:
html_static_path = []
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
if PACKAGE_NAME == 'apache-airflow':
html_js_files = ['jira-links.js']
else:
html_js_files = []
if PACKAGE_NAME == 'apache-airflow':
html_extra_path = [
f"{ROOT_DIR}/docs/apache-airflow/start/airflow.sh",
]
html_extra_with_substitutions = [
f"{ROOT_DIR}/docs/apache-airflow/start/docker-compose.yaml",
]
manual_substitutions_in_generated_html = [
"installation.html",
]
# -- Theme configuration -------------------------------------------------------
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
if FOR_PRODUCTION
else [
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Theme configuration
html_theme_options: Dict[str, Any] = {
'hide_website_buttons': True,
}
if FOR_PRODUCTION:
html_theme_options['navbar_links'] = [
{'href': '/community/', 'text': 'Community'},
{'href': '/meetups/', 'text': 'Meetups'},
{'href': '/docs/', 'text': 'Documentation'},
{'href': '/use-cases/', 'text': 'Use-cases'},
{'href': '/announcements/', 'text': 'Announcements'},
{'href': '/blog/', 'text': 'Blog'},
{'href': '/ecosystem/', 'text': 'Ecosystem'},
]
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': f'/docs/{PACKAGE_NAME}/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'devel',
'display_github': 'devel',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
if PACKAGE_NAME == 'apache-airflow':
deprecated_options: Dict[str, Dict[str, Tuple[str, str, str]]] = defaultdict(dict)
for (section, key), (
(deprecated_section, deprecated_key, since_version)
) in AirflowConfigParser.deprecated_options.items():
deprecated_options[deprecated_section][deprecated_key] = section, key, since_version
jinja_contexts = {
'config_ctx': {"configs": default_config_yaml(), "deprecated_options": deprecated_options},
'quick_start_ctx': {
'doc_root_url': f'https://airflow.apache.org/docs/apache-airflow/{PACKAGE_VERSION}/'
if FOR_PRODUCTION
else (
'http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com/docs/apache-airflow/latest/'
)
},
}
elif PACKAGE_NAME.startswith('apache-airflow-providers-'):
def _load_config():
templates_dir = os.path.join(PACKAGE_DIR, 'config_templates')
file_path = os.path.join(templates_dir, "config.yml")
if not os.path.exists(file_path):
return {}
with open(file_path) as f:
return yaml.load(f, SafeLoader)
config = _load_config()
if config:
jinja_contexts = {'config_ctx': {"configs": config}}
extensions.append('sphinxcontrib.jinja')
elif PACKAGE_NAME == 'helm-chart':
def _str_representer(dumper, data):
style = "|" if "\n" in data else None # show as a block scalar if we have more than 1 line
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style)
yaml.add_representer(str, _str_representer)
def _format_default(value: Any) -> str:
if value == "":
return '""'
if value is None:
return '~'
return str(value)
def _format_examples(param_name: str, schema: dict) -> Optional[str]:
if not schema.get("examples"):
return None
# Nicer to have the parameter name shown as well
out = ""
for ex in schema["examples"]:
if schema["type"] == "array":
ex = [ex]
out += yaml.dump({param_name: ex})
return out
def _get_params(root_schema: dict, prefix: str = "", default_section: str = "") -> List[dict]:
"""
Given an jsonschema objects properties dict, return a flattened list of all parameters
from that object and any nested objects
"""
# TODO: handle arrays? probably missing more cases too
out = []
for param_name, schema in root_schema.items():
prefixed_name = f"{prefix}.{param_name}" if prefix else param_name
section_name = schema["x-docsSection"] if "x-docsSection" in schema else default_section
if section_name and schema["description"] and "default" in schema:
out.append(
{
"section": section_name,
"name": prefixed_name,
"description": schema["description"],
"default": _format_default(schema["default"]),
"examples": _format_examples(param_name, schema),
}
)
if schema.get("properties"):
out += _get_params(schema["properties"], prefixed_name, section_name)
return out
schema_file = os.path.join(PACKAGE_DIR, "values.schema.json") # type: ignore
with open(schema_file) as config_file:
chart_schema = json.load(config_file)
params = _get_params(chart_schema["properties"])
# Now, split into sections
sections: Dict[str, List[Dict[str, str]]] = {}
for param in params:
if param["section"] not in sections:
sections[param["section"]] = []
sections[param["section"]].append(param)
# and order each section
for section in sections.values(): # type: ignore
section.sort(key=lambda i: i["name"]) # type: ignore
# and finally order the sections!
ordered_sections = []
for name in chart_schema["x-docsSectionOrder"]:
if name not in sections:
raise ValueError(f"Unable to find any parameters for section: {name}")
ordered_sections.append({"name": name, "params": sections.pop(name)})
if sections:
raise ValueError(f"Found section(s) which were not in `section_order`: {list(sections.keys())}")
jinja_contexts = {"params_ctx": {"sections": ordered_sections}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slack_sdk',
'smbclient',
'snowflake',
'sshtunnel',
'telegram',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains names of other projects that should
# be linked to in this documentation.
# Inventories are only downloaded once by docs/exts/docs_build/fetch_inventories.py.
intersphinx_mapping = {
pkg_name: (f"{THIRD_PARTY_INDEXES[pkg_name]}/", (f'{INVENTORY_CACHE_DIR}/{pkg_name}/objects.inv',))
for pkg_name in [
'boto3',
'celery',
'docker',
'hdfs',
'jinja2',
'mongodb',
'pandas',
'python',
'requests',
'sqlalchemy',
]
}
if PACKAGE_NAME in ('apache-airflow-providers-google', 'apache-airflow'):
intersphinx_mapping.update(
{
pkg_name: (
f"{THIRD_PARTY_INDEXES[pkg_name]}/",
(f'{INVENTORY_CACHE_DIR}/{pkg_name}/objects.inv',),
)
for pkg_name in [
'google-api-core',
'google-cloud-automl',
'google-cloud-bigquery',
'google-cloud-bigquery-datatransfer',
'google-cloud-bigquery-storage',
'google-cloud-bigtable',
'google-cloud-container',
'google-cloud-core',
'google-cloud-datacatalog',
'google-cloud-datastore',
'google-cloud-dlp',
'google-cloud-kms',
'google-cloud-language',
'google-cloud-monitoring',
'google-cloud-pubsub',
'google-cloud-redis',
'google-cloud-spanner',
'google-cloud-speech',
'google-cloud-storage',
'google-cloud-tasks',
'google-cloud-texttospeech',
'google-cloud-translate',
'google-cloud-videointelligence',
'google-cloud-vision',
]
}
)
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
PACKAGE_DIR,
]
# A directory that has user-defined templates to override our default templates.
if PACKAGE_NAME == 'apache-airflow':
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'airflow/configuration/',
'*/example_dags/*',
'*/_internal*',
'*/node_modules/*',
'*/migrations/*',
'*/contrib/*',
]
if PACKAGE_NAME == 'apache-airflow':
autoapi_ignore.append('*/airflow/providers/*')
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# Whether to insert the generated documentation into the TOC tree. If this is False, the default AutoAPI
# index page is not generated and you will need to include the generated documentation in a
# TOC tree entry yourself.
autoapi_add_toctree_entry = False
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib-spelling ----------------------------------------
spelling_word_list_filename = [os.path.join(CONF_DIR, 'spelling_wordlist.txt')]
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
if PACKAGE_NAME == 'apache-airflow':
OPENAPI_FILE = os.path.join(
os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml"
)
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/[email protected]/bundles/redoc.standalone.js"
| apache-2.0 |
kalvdans/scipy | scipy/interpolate/_cubic.py | 37 | 29281 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
shangwuhencc/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
sonnyhu/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
CanisMajoris/ThinkStats2 | code/thinkstats2.py | 68 | 68825 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| gpl-3.0 |
yuruofeifei/mxnet | example/ssd/dataset/pycocotools/coco.py | 29 | 19564 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
# from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
# rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
raise NotImplementedError("maskUtils disabled!")
else:
rle = [ann['segmentation']]
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
# ann['area'] = maskUtils.area(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
if not 'bbox' in ann:
# ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
raise NotImplementedError("maskUtils disabled!")
elif type(segm['counts']) == list:
# uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
raise NotImplementedError("maskUtils disabled!")
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
return m
| apache-2.0 |
jstoxrocky/statsmodels | statsmodels/tools/data.py | 23 | 3369 | """
Compatibility tools for various data structure inputs
"""
from statsmodels.compat.python import range
import numpy as np
import pandas as pd
def _check_period_index(x, freq="M"):
from pandas import PeriodIndex, DatetimeIndex
if not isinstance(x.index, (DatetimeIndex, PeriodIndex)):
raise ValueError("The index must be a DatetimeIndex or PeriodIndex")
from statsmodels.tsa.base.datetools import _infer_freq
inferred_freq = _infer_freq(x.index)
if not inferred_freq.startswith(freq):
raise ValueError("Expected frequency {}. Got {}".format(inferred_freq,
freq))
def is_data_frame(obj):
return isinstance(obj, pd.DataFrame)
def is_design_matrix(obj):
from patsy import DesignMatrix
return isinstance(obj, DesignMatrix)
def _is_structured_ndarray(obj):
return isinstance(obj, np.ndarray) and obj.dtype.names is not None
def interpret_data(data, colnames=None, rownames=None):
"""
Convert passed data structure to form required by estimation classes
Parameters
----------
data : ndarray-like
colnames : sequence or None
May be part of data structure
rownames : sequence or None
Returns
-------
(values, colnames, rownames) : (homogeneous ndarray, list)
"""
if isinstance(data, np.ndarray):
if _is_structured_ndarray(data):
if colnames is None:
colnames = data.dtype.names
values = struct_to_ndarray(data)
else:
values = data
if colnames is None:
colnames = ['Y_%d' % i for i in range(values.shape[1])]
elif is_data_frame(data):
# XXX: hack
data = data.dropna()
values = data.values
colnames = data.columns
rownames = data.index
else: # pragma: no cover
raise Exception('cannot handle other input types at the moment')
if not isinstance(colnames, list):
colnames = list(colnames)
# sanity check
if len(colnames) != values.shape[1]:
raise ValueError('length of colnames does not match number '
'of columns in data')
if rownames is not None and len(rownames) != len(values):
raise ValueError('length of rownames does not match number '
'of rows in data')
return values, colnames, rownames
def struct_to_ndarray(arr):
return arr.view((float, len(arr.dtype.names)))
def _is_using_ndarray_type(endog, exog):
return (type(endog) is np.ndarray and
(type(exog) is np.ndarray or exog is None))
def _is_using_ndarray(endog, exog):
return (isinstance(endog, np.ndarray) and
(isinstance(exog, np.ndarray) or exog is None))
def _is_using_pandas(endog, exog):
klasses = (pd.Series, pd.DataFrame, pd.WidePanel)
return (isinstance(endog, klasses) or isinstance(exog, klasses))
def _is_array_like(endog, exog):
try: # do it like this in case of mixed types, ie., ndarray and list
endog = np.asarray(endog)
exog = np.asarray(exog)
return True
except:
return False
def _is_using_patsy(endog, exog):
# we get this when a structured array is passed through a formula
return (is_design_matrix(endog) and
(is_design_matrix(exog) or exog is None))
| bsd-3-clause |
dvro/UnbalancedDataset | imblearn/ensemble/tests/test_balance_cascade.py | 2 | 29514 | """Test the module balance cascade."""
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_raises
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.utils.estimator_checks import check_estimator
from imblearn.ensemble import BalanceCascade
# Generate a global dataset to use
RND_SEED = 0
X = np.array([[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234]])
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
def test_bc_sk_estimator():
"""Test the sklearn estimator compatibility"""
check_estimator(BalanceCascade)
def test_bc_bad_ratio():
"""Test either if an error is raised with a wrong decimal value for
the ratio"""
# Define a negative ratio
ratio = -1.0
bc = BalanceCascade(ratio=ratio)
assert_raises(ValueError, bc.fit, X, Y)
# Define a ratio greater than 1
ratio = 100.0
bc = BalanceCascade(ratio=ratio)
assert_raises(ValueError, bc.fit, X, Y)
# Define ratio as an unknown string
ratio = 'rnd'
bc = BalanceCascade(ratio=ratio)
assert_raises(ValueError, bc.fit, X, Y)
# Define ratio as a list which is not supported
ratio = [.5, .5]
bc = BalanceCascade(ratio=ratio)
assert_raises(ValueError, bc.fit, X, Y)
def test_bc_init():
"""Test the initialisation of the object"""
# Define a ratio
ratio = 1.
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED)
assert_equal(bc.ratio, ratio)
assert_equal(bc.bootstrap, True)
assert_equal(bc.n_max_subset, None)
assert_equal(bc.random_state, RND_SEED)
def test_bc_fit_single_class():
"""Test either if an error when there is a single class"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED)
# Resample the data
# Create a wrong y
y_single_class = np.zeros((X.shape[0], ))
assert_warns(RuntimeWarning, bc.fit, X, y_single_class)
def test_bc_fit_invalid_ratio():
"""Test either if an error is raised when the balancing ratio to fit is
smaller than the one of the data"""
# Create the object
ratio = 1. / 10000.
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED)
# Fit the data
assert_raises(RuntimeError, bc.fit_sample, X, Y)
def test_bc_fit():
"""Test the fitting method"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED)
# Fit the data
bc.fit(X, Y)
# Check if the data information have been computed
assert_equal(bc.min_c_, 0)
assert_equal(bc.maj_c_, 1)
assert_equal(bc.stats_c_[0], 8)
assert_equal(bc.stats_c_[1], 12)
def test_sample_wt_fit():
"""Test either if an error is raised when sample is called before
fitting"""
# Define the parameter for the under-sampling
ratio = 'auto'
# Create the object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED)
assert_raises(RuntimeError, bc.sample, X, Y)
def test_fit_sample_auto():
"""Test the fit and sample routine with auto ratio."""
# Define the ratio parameter
ratio = 'auto'
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
currdir = os.path.dirname(os.path.abspath(__file__))
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.70472253, -0.73309052],
[-0.18410027, -0.45194484],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10, 2,
8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 4, 8, 0, 3, 5,
9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_fit_sample_half():
"""Test the fit and sample routine with 0.5 ratio."""
# Define the ratio parameter
ratio = 0.8
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED)
# Get the different subset
X_resampled, y_resampled = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695],
[0.9281014, 0.53085498],
[0.3084254, 0.33299982]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.70472253, -0.73309052],
[-0.18410027, -0.45194484],
[0.77481731, 0.60935141],
[0.28893132, -0.38761769]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
def test_fit_sample_auto_decision_tree():
"""Test the fit and sample routine with auto ratio with a decision
tree."""
# Define the ratio parameter
ratio = 'auto'
classifier = 'decision-tree'
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, classifier=classifier)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[-1.11515198, -0.93689695],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4,
10, 2, 8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 7, 0, 3, 5,
9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_fit_sample_auto_random_forest():
"""Test the fit and sample routine with auto ratio with a random
forest."""
# Define the ratio parameter
ratio = 'auto'
classifier = 'random-forest'
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, classifier=classifier)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[-0.14374509, 0.27370049],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10,
2, 8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 10, 0, 3, 5,
9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_fit_sample_auto_adaboost():
"""Test the fit and sample routine with auto ratio with a adaboost."""
# Define the ratio parameter
ratio = 'auto'
classifier = 'adaboost'
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, classifier=classifier)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[-0.14374509, 0.27370049],
[-1.11515198, -0.93689695],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10, 2,
8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 10, 7, 0, 3, 5,
9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_fit_sample_auto_gradient_boosting():
"""Test the fit and sample routine with auto ratio with a gradient
boosting."""
# Define the ratio parameter
ratio = 'auto'
classifier = 'gradient-boosting'
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, classifier=classifier)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[-0.14374509, 0.27370049],
[-1.11515198, -0.93689695],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10,
2, 8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 10, 7, 0, 3,
5, 9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_fit_sample_auto_linear_svm():
"""Test the fit and sample routine with auto ratio with a linear
svm."""
# Define the ratio parameter
ratio = 'auto'
classifier = 'linear-svm'
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, classifier=classifier)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.70472253, -0.73309052],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10,
2, 8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 4, 0, 3,
5, 9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_init_wrong_classifier():
"""Test either if an error is raised the classifier provided is unknown."""
# Define the ratio parameter
classifier = 'rnd'
bc = BalanceCascade(classifier=classifier)
assert_raises(NotImplementedError, bc.fit_sample, X, Y)
def test_fit_sample_auto_early_stop():
"""Test the fit and sample routine with auto ratio with 1 subset."""
# Define the ratio parameter
ratio = 'auto'
n_subset = 1
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, n_max_subset=n_subset)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([[[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]])
y_gt = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]])
idx_gt = np.array([[0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10, 2, 8, 1, 7]])
# Check each array
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_fit_sample_auto_early_stop_2():
"""Test the fit and sample routine with auto ratio with a 2 subsets."""
# Define the ratio parameter
ratio = 'auto'
n_subset = 2
# Create the sampling object
bc = BalanceCascade(ratio=ratio, random_state=RND_SEED,
return_indices=True, n_max_subset=n_subset)
# Get the different subset
X_resampled, y_resampled, idx_under = bc.fit_sample(X, Y)
X_gt = np.array([np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.08711622, 0.93259929],
[0.70472253, -0.73309052],
[-0.14374509, 0.27370049],
[0.83680821, 1.72827342],
[-0.18410027, -0.45194484],
[-0.28162401, -2.10400981],
[-1.11515198, -0.93689695]]),
np.array([[0.11622591, -0.0317206],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-0.41635887, -0.38299653],
[1.70580611, -0.11219234],
[1.15514042, 0.0129463],
[0.70472253, -0.73309052],
[-0.18410027, -0.45194484],
[0.77481731, 0.60935141],
[0.3084254, 0.33299982],
[0.28893132, -0.38761769],
[0.9281014, 0.53085498]])], dtype=object)
y_gt = np.array([np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1, 1]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1, 1])], dtype=object)
idx_gt = np.array([np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 11, 4, 10,
2, 8, 1, 7]),
np.array([0, 2, 3, 4, 11, 12, 17, 19, 6, 4, 8, 0, 3,
5, 9])], dtype=object)
# Check each array
for idx in range(X_gt.size):
assert_array_equal(X_resampled[idx], X_gt[idx])
assert_array_equal(y_resampled[idx], y_gt[idx])
assert_array_equal(idx_under[idx], idx_gt[idx])
def test_sample_wrong_X():
"""Test either if an error is raised when X is different at fitting
and sampling"""
# Create the object
bc = BalanceCascade(random_state=RND_SEED)
bc.fit(X, Y)
assert_raises(RuntimeError, bc.sample, np.random.random((100, 40)),
np.array([0] * 50 + [1] * 50))
def test_multiclass_error():
""" Test either if an error is raised when the target are not binary
type. """
# continuous case
y = np.linspace(0, 1, 20)
bc = BalanceCascade(random_state=RND_SEED)
assert_warns(UserWarning, bc.fit, X, y)
# multiclass case
y = np.array([0] * 3 + [1] * 2 + [2] * 15)
bc = BalanceCascade(random_state=RND_SEED)
assert_warns(UserWarning, bc.fit, X, y)
| mit |
hmendozap/auto-sklearn | autosklearn/pipeline/components/classification/adaboost.py | 1 | 3362 | from autosklearn.pipeline.implementations.MultilabelClassifier import \
MultilabelClassifier
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter
from autosklearn.pipeline.components.base import AutoSklearnClassificationAlgorithm
from autosklearn.pipeline.constants import *
class AdaboostClassifier(AutoSklearnClassificationAlgorithm):
def __init__(self, n_estimators, learning_rate, algorithm, max_depth,
random_state=None):
self.n_estimators = int(n_estimators)
self.learning_rate = float(learning_rate)
self.algorithm = algorithm
self.random_state = random_state
self.max_depth = max_depth
self.estimator = None
def fit(self, X, Y, sample_weight=None):
import sklearn.ensemble
import sklearn.tree
self.n_estimators = int(self.n_estimators)
self.learning_rate = float(self.learning_rate)
self.max_depth = int(self.max_depth)
base_estimator = sklearn.tree.DecisionTreeClassifier(max_depth=self.max_depth)
estimator = sklearn.ensemble.AdaBoostClassifier(
base_estimator=base_estimator,
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
algorithm=self.algorithm,
random_state=self.random_state
)
if len(Y.shape) == 2 and Y.shape[1] > 1:
estimator = MultilabelClassifier(estimator, n_jobs=1)
estimator.fit(X, Y, sample_weight=sample_weight)
else:
estimator.fit(X, Y, sample_weight=sample_weight)
self.estimator = estimator
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'AB',
'name': 'AdaBoost Classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
# base_estimator = Constant(name="base_estimator", value="None")
n_estimators = cs.add_hyperparameter(UniformIntegerHyperparameter(
name="n_estimators", lower=50, upper=500, default=50, log=False))
learning_rate = cs.add_hyperparameter(UniformFloatHyperparameter(
name="learning_rate", lower=0.0001, upper=2, default=0.1, log=True))
algorithm = cs.add_hyperparameter(CategoricalHyperparameter(
name="algorithm", choices=["SAMME.R", "SAMME"], default="SAMME.R"))
max_depth = cs.add_hyperparameter(UniformIntegerHyperparameter(
name="max_depth", lower=1, upper=10, default=1, log=False))
return cs
| bsd-3-clause |
deepakantony/sms-tools | software/models_interface/sineModel_function.py | 21 | 2749 | # function to call the main analysis/synthesis functions in software/models/sineModel.py
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sineModel as SM
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
Perform analysis/synthesis using the sinusoidal model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
fs, x = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# analyze the sound with the sinusoidal model
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
# synthesize the output sound from the sinusoidal representation
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
# output sound file name
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sineModel.wav'
# write the synthesized sound obtained from the sinusoidal synthesis
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the sinusoidal frequencies
plt.subplot(3,1,2)
if (tfreq.shape[1] > 0):
numFrames = tfreq.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
tfreq[tfreq<=0] = np.nan
plt.plot(frmTime, tfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of sinusoidal tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
AlexanderFabisch/scikit-learn | examples/text/document_classification_20newsgroups.py | 27 | 10521 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
marcocaccin/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
abgoswam/data-science-from-scratch | code-python3/nearest_neighbors.py | 4 | 7323 | from collections import Counter
from linear_algebra import distance
from statistics import mean
import math, random
import matplotlib.pyplot as plt
def raw_majority_vote(labels):
votes = Counter(labels)
winner, _ = votes.most_common(1)[0]
return winner
def majority_vote(labels):
"""assumes that labels are ordered from nearest to farthest"""
vote_counts = Counter(labels)
winner, winner_count = vote_counts.most_common(1)[0]
num_winners = len([count
for count in vote_counts.values()
if count == winner_count])
if num_winners == 1:
return winner # unique winner, so return it
else:
return majority_vote(labels[:-1]) # try again without the farthest
def knn_classify(k, labeled_points, new_point):
"""each labeled point should be a pair (point, label)"""
# order the labeled points from nearest to farthest
by_distance = sorted(labeled_points,
key=lambda point_label: distance(point_label[0], new_point))
# find the labels for the k closest
k_nearest_labels = [label for _, label in by_distance[:k]]
# and let them vote
return majority_vote(k_nearest_labels)
cities = [(-86.75,33.5666666666667,'Python'),(-88.25,30.6833333333333,'Python'),(-112.016666666667,33.4333333333333,'Java'),(-110.933333333333,32.1166666666667,'Java'),(-92.2333333333333,34.7333333333333,'R'),(-121.95,37.7,'R'),(-118.15,33.8166666666667,'Python'),(-118.233333333333,34.05,'Java'),(-122.316666666667,37.8166666666667,'R'),(-117.6,34.05,'Python'),(-116.533333333333,33.8166666666667,'Python'),(-121.5,38.5166666666667,'R'),(-117.166666666667,32.7333333333333,'R'),(-122.383333333333,37.6166666666667,'R'),(-121.933333333333,37.3666666666667,'R'),(-122.016666666667,36.9833333333333,'Python'),(-104.716666666667,38.8166666666667,'Python'),(-104.866666666667,39.75,'Python'),(-72.65,41.7333333333333,'R'),(-75.6,39.6666666666667,'Python'),(-77.0333333333333,38.85,'Python'),(-80.2666666666667,25.8,'Java'),(-81.3833333333333,28.55,'Java'),(-82.5333333333333,27.9666666666667,'Java'),(-84.4333333333333,33.65,'Python'),(-116.216666666667,43.5666666666667,'Python'),(-87.75,41.7833333333333,'Java'),(-86.2833333333333,39.7333333333333,'Java'),(-93.65,41.5333333333333,'Java'),(-97.4166666666667,37.65,'Java'),(-85.7333333333333,38.1833333333333,'Python'),(-90.25,29.9833333333333,'Java'),(-70.3166666666667,43.65,'R'),(-76.6666666666667,39.1833333333333,'R'),(-71.0333333333333,42.3666666666667,'R'),(-72.5333333333333,42.2,'R'),(-83.0166666666667,42.4166666666667,'Python'),(-84.6,42.7833333333333,'Python'),(-93.2166666666667,44.8833333333333,'Python'),(-90.0833333333333,32.3166666666667,'Java'),(-94.5833333333333,39.1166666666667,'Java'),(-90.3833333333333,38.75,'Python'),(-108.533333333333,45.8,'Python'),(-95.9,41.3,'Python'),(-115.166666666667,36.0833333333333,'Java'),(-71.4333333333333,42.9333333333333,'R'),(-74.1666666666667,40.7,'R'),(-106.616666666667,35.05,'Python'),(-78.7333333333333,42.9333333333333,'R'),(-73.9666666666667,40.7833333333333,'R'),(-80.9333333333333,35.2166666666667,'Python'),(-78.7833333333333,35.8666666666667,'Python'),(-100.75,46.7666666666667,'Java'),(-84.5166666666667,39.15,'Java'),(-81.85,41.4,'Java'),(-82.8833333333333,40,'Java'),(-97.6,35.4,'Python'),(-122.666666666667,45.5333333333333,'Python'),(-75.25,39.8833333333333,'Python'),(-80.2166666666667,40.5,'Python'),(-71.4333333333333,41.7333333333333,'R'),(-81.1166666666667,33.95,'R'),(-96.7333333333333,43.5666666666667,'Python'),(-90,35.05,'R'),(-86.6833333333333,36.1166666666667,'R'),(-97.7,30.3,'Python'),(-96.85,32.85,'Java'),(-95.35,29.9666666666667,'Java'),(-98.4666666666667,29.5333333333333,'Java'),(-111.966666666667,40.7666666666667,'Python'),(-73.15,44.4666666666667,'R'),(-77.3333333333333,37.5,'Python'),(-122.3,47.5333333333333,'Python'),(-89.3333333333333,43.1333333333333,'R'),(-104.816666666667,41.15,'Java')]
cities = [([longitude, latitude], language) for longitude, latitude, language in cities]
def plot_state_borders(plt, color='0.8'):
pass
def plot_cities():
# key is language, value is pair (longitudes, latitudes)
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
# we want each language to have a different marker and color
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
for (longitude, latitude), language in cities:
plots[language][0].append(longitude)
plots[language][1].append(latitude)
# create a scatter series for each language
for language, (x, y) in plots.items():
plt.scatter(x, y, color=colors[language], marker=markers[language],
label=language, zorder=10)
plot_state_borders(plt) # assume we have a function that does this
plt.legend(loc=0) # let matplotlib choose the location
plt.axis([-130,-60,20,55]) # set the axes
plt.title("Favorite Programming Languages")
plt.show()
def classify_and_plot_grid(k=1):
plots = { "Java" : ([], []), "Python" : ([], []), "R" : ([], []) }
markers = { "Java" : "o", "Python" : "s", "R" : "^" }
colors = { "Java" : "r", "Python" : "b", "R" : "g" }
for longitude in range(-130, -60):
for latitude in range(20, 55):
predicted_language = knn_classify(k, cities, [longitude, latitude])
plots[predicted_language][0].append(longitude)
plots[predicted_language][1].append(latitude)
# create a scatter series for each language
for language, (x, y) in plots.items():
plt.scatter(x, y, color=colors[language], marker=markers[language],
label=language, zorder=0)
plot_state_borders(plt, color='black') # assume we have a function that does this
plt.legend(loc=0) # let matplotlib choose the location
plt.axis([-130,-60,20,55]) # set the axes
plt.title(str(k) + "-Nearest Neighbor Programming Languages")
plt.show()
#
# the curse of dimensionality
#
def random_point(dim):
return [random.random() for _ in range(dim)]
def random_distances(dim, num_pairs):
return [distance(random_point(dim), random_point(dim))
for _ in range(num_pairs)]
if __name__ == "__main__":
# try several different values for k
for k in [1, 3, 5, 7]:
num_correct = 0
for location, actual_language in cities:
other_cities = [other_city
for other_city in cities
if other_city != (location, actual_language)]
predicted_language = knn_classify(k, other_cities, location)
if predicted_language == actual_language:
num_correct += 1
print(k, "neighbor[s]:", num_correct, "correct out of", len(cities))
dimensions = range(1, 101, 5)
avg_distances = []
min_distances = []
random.seed(0)
for dim in dimensions:
distances = random_distances(dim, 10000) # 10,000 random pairs
avg_distances.append(mean(distances)) # track the average
min_distances.append(min(distances)) # track the minimum
print(dim, min(distances), mean(distances), min(distances) / mean(distances))
| unlicense |
jerkos/cobrapy | setup.py | 1 | 7289 | from os.path import isfile, abspath, dirname, join
from sys import argv, path
# To temporarily modify sys.path
SETUP_DIR = abspath(dirname(__file__))
try:
from setuptools import setup, find_packages
except ImportError:
path.insert(0, SETUP_DIR)
import ez_setup
path.pop(0)
ez_setup.use_setuptools()
from setuptools import setup, find_packages
# for running parallel tests due to a bug in python 2.7.3
# http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except:
None
# import version to get the version string
path.insert(0, join(SETUP_DIR, "cobra"))
from version import get_version, update_release_version
path.pop(0)
version = get_version(pep440=True)
# If building something for distribution, ensure the VERSION
# file is up to date
if "sdist" in argv or "bdist_wheel" in argv:
update_release_version()
# cython is optional for building. The c file can be used directly. However,
# for certain functions, the c file must be generated, which requires cython.
try:
from Cython.Build import cythonize
from distutils.version import StrictVersion
import Cython
try:
cython_version = StrictVersion(Cython.__version__)
except ValueError:
raise ImportError("Cython version not parseable")
else:
if cython_version < StrictVersion("0.21"):
raise ImportError("Cython version too old to use")
except ImportError:
cythonize = None
for k in ["sdist", "develop"]:
if k in argv:
raise Exception("Cython >= 0.21 required for " + k)
# Begin constructing arguments for building
setup_kwargs = {}
# for building the cglpk solver
try:
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
from os import name
from platform import system
class FailBuild(build_ext):
"""allow building of the C extension to fail"""
def run(self):
try:
build_ext.run(self)
except Exception as e:
warn(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except:
None
build_args = {}
setup_kwargs["cmdclass"] = {"build_ext": FailBuild}
# MAC OS X needs some additional configuration tweaks
# Build should be run with the python.org python
# Cython will output C which could generate warnings in clang
# due to the addition of additional unneeded functions. Because
# this is a known phenomenon, these warnings are silenced to
# make other potential warnings which do signal errors stand
# out.
if system() == "Darwin":
build_args["extra_compile_args"] = ["-Wno-unused-function"]
build_args["libraries"] = ["glpk"]
# It is possible to statically link libglpk to the built extension. This
# allows for simplified installation without the need to install libglpk to
# the system, and is also usueful when installing a particular version of
# glpk which conflicts with thesystem version. A static libglpk.a can be
# built by running configure with the export CLFAGS="-fPIC" and copying the
# file from src/.libs to either the default lib directory or to the build
# directory. For an example script, see
# https://gist.github.com/aebrahim/94a2b231d86821f7f225
include_dirs = []
library_dirs = []
if isfile("libglpk.a"):
library_dirs.append(abspath("."))
if isfile("glpk.h"):
include_dirs.append(abspath("."))
if name == "posix":
from subprocess import check_output
try:
glpksol_path = check_output(["which", "glpsol"]).strip()
glpk_path = abspath(join(dirname(glpksol_path), ".."))
include_dirs.append(join(glpk_path, "include"))
library_dirs.append(join(glpk_path, "lib"))
except:
None
if len(include_dirs) > 0:
build_args["include_dirs"] = include_dirs
if len(library_dirs) > 0:
build_args["library_dirs"] = library_dirs
# use cython if present, otherwise use c file
if cythonize:
ext_modules = cythonize([Extension("cobra.solvers.cglpk",
["cobra/solvers/cglpk.pyx"],
**build_args)],
force=True)
else:
ext_modules = [Extension("cobra.solvers.cglpk",
["cobra/solvers/cglpk.c"], **build_args)]
except:
ext_modules = None
extras = {
'matlab': ["pymatbridge"],
'sbml': ["python-libsbml", "lxml"],
'array': ["numpy>=1.6", "scipy>=11.0"],
'display': ["matplotlib", "brewer2mpl", "pandas"]
}
all_extras = {'Cython>=0.21'}
for extra in extras.values():
all_extras.update(extra)
extras["all"] = list(all_extras)
# If using bdist_wininst, the installer will not get dependencies like
# a setuptools installation does. Therefore, for the one external dependency,
# which is six.py, we can just download it here and include it in the
# installer.
# The file six.py will need to be manually downloaded and placed in the
# same directory as setup.py.
if "bdist_wininst" in argv:
setup_kwargs["py_modules"] = ["six"]
try:
import pypandoc
readme = pypandoc.convert("README.md", "rst")
install = pypandoc.convert("INSTALL.md", "rst")
setup_kwargs["long_description"] = readme + "\n\n" + install
except:
with open("README.md", "r") as infile:
setup_kwargs["long_description"] = infile.read()
setup(
name="cobra",
version=version,
packages=find_packages(exclude=['cobra.oven', 'cobra.oven*']),
setup_requires=[],
install_requires=["six"],
tests_require=["jsonschema > 2.5"],
extras_require=extras,
ext_modules=ext_modules,
package_data={
'': ['test/data/*',
'VERSION',
'mlab/matlab_scripts/*m']},
author="Daniel Robert Hyduke <[email protected]>, "
"Ali Ebrahim <[email protected]>",
author_email="[email protected]",
description="COBRApy is a package for constraints-based modeling of "
"biological networks",
license="LGPL/GPL v2+",
keywords="metabolism biology linear programming optimization flux"
" balance analysis fba",
url="https://opencobra.github.io/cobrapy",
test_suite="cobra.test.suite",
download_url='https://pypi.python.org/pypi/cobra',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2'
' or later (LGPLv2+)',
'License :: OSI Approved :: GNU General Public License v2'
' or later (GPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Cython',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
platforms="GNU/Linux, Mac OS X >= 10.7, Microsoft Windows >= 7",
**setup_kwargs)
| lgpl-2.1 |
seagoat/mining | mining/controllers/stream.py | 2 | 4185 | # -*- coding: utf-8 -*-
from gevent import monkey
monkey.patch_all()
import json
import gc
from bottle import Bottle, abort, request
from bottle.ext.websocket import websocket
from bottle.ext.mongo import MongoPlugin
from pandas import DataFrame
from mining.utils import conf
from mining.utils._pandas import df_generate, DataFrameSearchColumn
from mining.db import DataWarehouse
stream_app = Bottle()
mongo = MongoPlugin(
uri=conf("mongodb")["uri"],
db=conf("mongodb")["db"],
json_mongo=True)
stream_app.install(mongo)
@stream_app.route('/data/<slug>', apply=[websocket])
def data(ws, mongodb, slug):
if not ws:
abort(400, 'Expected WebSocket request.')
DW = DataWarehouse()
element = mongodb['element'].find_one({'slug': slug})
element['page_limit'] = 50
if request.GET.get('limit', True) is False:
element['page_limit'] = 9999999999
if element['type'] == 'grid':
page = int(request.GET.get('page', 1))
page_start = 0
page_end = element['page_limit']
if page >= 2:
page_end = element['page_limit'] * page
page_start = page_end - element['page_limit']
else:
page = 1
page_start = None
page_end = None
filters = [i[0] for i in request.GET.iteritems()
if len(i[0].split('filter__')) > 1]
if not DW.search:
data = DW.get(element.get('cube'), page=page)
else:
data = DW.get(element.get('cube'), filters=filters, page=page)
columns = data.get('columns') or []
fields = columns
if request.GET.get('fields', None):
fields = request.GET.get('fields').split(',')
cube_last_update = mongodb['cube'].find_one({'slug': element.get('cube')})
ws.send(json.dumps({'type': 'last_update',
'data': str(cube_last_update.get('lastupdate', ''))}))
ws.send(json.dumps({'type': 'columns', 'data': fields}))
df = DataFrame(data.get('data') or {}, columns=fields)
if len(filters) >= 1:
for f in filters:
s = f.split('__')
field = s[1]
operator = s[2]
value = request.GET.get(f)
if operator == 'like':
df = df[df[field].str.contains(value)]
elif operator == 'regex':
df = DataFrameSearchColumn(df, field, value, operator)
else:
df = df.query(df_generate(df, value, f))
groupby = []
if request.GET.get('groupby', None):
groupby = request.GET.get('groupby', "").split(',')
if len(groupby) >= 1:
df = DataFrame(df.groupby(groupby).grouper.get_group_levels())
if request.GET.get('orderby',
element.get('orderby', None)) and request.GET.get(
'orderby', element.get('orderby', None)) in fields:
orderby = request.GET.get('orderby', element.get('orderby', ''))
if type(orderby) == str:
orderby = orderby.split(',')
orderby__order = request.GET.get('orderby__order',
element.get('orderby__order', ''))
if type(orderby__order) == str:
orderby__order = orderby__order.split(',')
ind = 0
for orde in orderby__order:
if orde == '0':
orderby__order[ind] = False
else:
orderby__order[ind] = True
ind += 1
df = df.sort(orderby, ascending=orderby__order)
ws.send(json.dumps({'type': 'max_page',
'data': data.get('count', len(df))}))
# CLEAN MEMORY
del filters, fields, columns
gc.collect()
categories = []
records = df.to_dict(orient='records')
if not DW.search:
records = records[page_start:page_end]
for i in records:
if element.get('categories', None):
categories.append(i[element.get('categories')])
ws.send(json.dumps({'type': 'data', 'data': i}))
# CLEAN MEMORY
del df
gc.collect()
ws.send(json.dumps({'type': 'categories', 'data': categories}))
ws.send(json.dumps({'type': 'close'}))
# CLEAN MEMORY
del categories
gc.collect()
| mit |
billy-inn/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
kalvdans/scipy | scipy/special/basic.py | 2 | 71090 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, less, inexact, nan, zeros, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, _gammaln,
ndtri, errprint, poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def gammaln(x):
"""
Logarithm of the absolute value of the Gamma function for real inputs.
Parameters
----------
x : array-like
Values on the real line at which to compute ``gammaln``
Returns
-------
gammaln : ndarray
Values of ``gammaln`` at x.
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal with
complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.
Note that `gammaln` currently accepts complex-valued inputs, but it is not
the same function as for real-valued inputs, and the branch is not
well-defined --- using `gammaln` with complex is deprecated and will be
disallowed in future Scipy versions.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
"""
if np.iscomplexobj(x):
warnings.warn(("Use of gammaln for complex arguments is "
"deprecated as of scipy 0.18.0. Use "
"scipy.special.loggamma instead."),
DeprecationWarning)
return _gammaln(x)
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
@np.deprecate(message="scipy.special.sph_jn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn instead. "
"Note that the new function has a different signature.")
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
See also
--------
spherical_jn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
@np.deprecate(message="scipy.special.sph_yn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_jnyn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn and "
"scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_jn
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_in is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in instead. "
"Note that the new function has a different signature.")
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
See also
--------
spherical_in
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
@np.deprecate(message="scipy.special.sph_kn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
@np.deprecate(message="scipy.special.sph_inkn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in and "
"scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_in
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a, b)
agm(a, b)=agm(b, a)
agm(a, a) = a
min(a, b) < agm(a, b) < max(a, b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# http://stackoverflow.com/a/16327037/125507
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann zeta function.
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See also
--------
zetac
"""
if q is None:
q = 1
return _zeta(x, q, out)
| bsd-3-clause |
terkkila/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
NMTHydro/Recharge | zobs/orecharge/Point_Analysis/ETRM_Point_SAUA_spider_only.py | 1 | 9527 | # ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib import rc
from osgeo import ogr
import etrm_daily_SA_2MAY16
import extract_readIn
import numpy as np
import pandas
rc('mathtext', default='regular')
save_path = 'C:\\Users\\David\\Documents\\ArcGIS\\results\\Sensitivity_analysis\\data'
pandas.set_option('display.max_rows', 3000)
pandas.set_option('display.max_columns', 3000)
pandas.set_option('display.width', 10000)
pandas.set_option('display.precision', 3)
pandas.options.display.float_format = '${:,.2f}'.format
np.set_printoptions(threshold=3000, edgeitems=5000, precision=3)
pandas.set_option('display.height', 5000)
pandas.set_option('display.max_rows', 5000)
startTime = datetime.now()
print startTime
def print_full(x):
pandas.set_option('display.max_rows', len(x))
print(x)
pandas.reset_option('display.max_rows')
print (x)
def round_to_value(number, roundto):
return round(number / roundto) * roundto
def dfv(begin_ind, end_ind):
return df.iloc[begin_ind, end_ind]
def save_df(df, save_path):
df.to_csv('{}\\data1.csv'.format(save_path), sep=',')
np.set_printoptions(linewidth=700, precision=2, threshold=2500)
# Set start datetime object
start, end = datetime(2000, 1, 1), datetime(2013, 12, 31)
# Define winter and summer for SNOW algorithm
sWin, eWin = datetime(start.year, 11, 1), datetime(end.year, 3, 30)
# Define monsoon for Ksat, presumed storm intensity
sMon, eMon = datetime(start.year, 6, 1), datetime(start.year, 10, 1)
temps = range(-5, 6)
all_pct = [x * 0.1 for x in range(5, 16)]
ndvi_range = np.linspace(0.9, 1.7, 11)
ndvi_range = np.array([round_to_value(x, 0.05) for x in ndvi_range])
var_arrs = []
y = 0
for x in range(0, 6):
ones = np.ones((5, 11), dtype=float)
zeros = [x * 0.0 for x in range(5, 16)]
norm_ndvi = np.array([1.25 for x in zeros])
if y == 0:
arr = np.insert(ones, y, temps, axis=0)
arr = np.insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
elif y == 4:
arr = np.insert(ones, 0, zeros, axis=0)
arr = np.insert(arr, y, ndvi_range, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
elif y == 5:
arr = np.insert(ones, 0, zeros, axis=0)
arr = np.insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:5]
arr = np.insert(arr, y, all_pct, axis=0)
var_arrs.append(arr)
arr = []
else:
arr = np.insert(ones, 0, zeros, axis=0)
arr = np.insert(arr, y, all_pct, axis=0)
arr = np.insert(arr, 4, norm_ndvi, axis=0)
arr = arr[0:6]
var_arrs.append(arr)
arr = []
y += 1
factors = ['Temperature', 'Precipitation', 'Reference ET', 'Total Water Storage (TAW)',
'Vegetation Density (NDVI)', 'Soil Evaporation Depth']
normalize_list = [2, 0.20, 0.20, 2, 0.20, 0.50]
normalize_list = [1 for x in range(0, len(normalize_list) + 1)]
site_list = ['Bateman', 'Navajo_Whiskey_Ck', 'Quemazon', 'Sierra_Blanca', 'SB_1', 'SB_2', 'SB_4', 'SB_5', 'VC_1',
'VC_2', 'VC_3', 'CH_1', 'CH_3', 'MG_1', 'MG_2', 'WHLR_PK', 'LP', 'South_Baldy',
'Water_Canyon', 'La_Jencia', 'Socorro']
df = pandas.DataFrame(columns=factors, index=site_list)
df_norm = pandas.DataFrame(columns=factors, index=site_list)
yy = 0
for var_arr in var_arrs:
factor = factors[yy]
print factor
print ''
shp_filename = 'C:\\Recharge_GIS\\qgis_layers\\sensitivity_points\\SA_pnts29APR16_UTM.shp'
ds = ogr.Open(shp_filename)
lyr = ds.GetLayer()
defs = lyr.GetLayerDefn()
for feat in lyr:
name = feat.GetField("Name")
name = name.replace(' ', '_')
geom = feat.GetGeometryRef()
mx, my = geom.GetX(), geom.GetY()
path = 'C:\Users\David\Documents\Recharge\Sensitivity_analysis\SA_extracts'
file_name = '{}\\{}_extract.csv'.format(path, name)
print file_name
extract_data = extract_readIn.read_std_extract_csv(file_name)
rslts = []
for col in var_arr.T:
pt_data, tot_data, mass_data = etrm_daily_SA_2MAY16.run_daily_etrm(start, end, extract_data,
sMon, eMon, col)
rech = np.sum(pt_data[:, 9])
rslts.append(rech)
df.iloc[site_list.index(name), factors.index(factor)] = np.divide(np.array(rslts), 14.0)
# tot_data : precip, et, tot_transp, tot_evap, infil, runoff, snow_fall, cum_mass, end_mass
yy += 1
# "SI = [Q(Po + delP] -Q(Po - delP] / (2 * delP)"
# where SI = Sensitivity Index, Q = recharge, Po = base value of input parameter, delP = change in value of input var
# find sensitivity index
xx = 0
for param in df.iteritems():
data_cube = param[1]
var_arr = var_arrs[xx]
yy = 0
for site in data_cube:
print site
site_name = site_list[yy]
normal = normalize_list[xx]
site_obj = [x for x in site]
sens_list = []
zz = 0
for var in var_arr[xx]:
if var != var_arr[xx][5]:
base = var_arr[xx][5]
deltaP = var - base
obj = site_obj[zz]
sen = ((obj * (base + deltaP) - obj * (base - deltaP)) / (2 * deltaP)) * normal
sens_list.append(sen)
zz += 1
else:
sens_list.append(site_obj[zz])
zz += 1
sens_list = np.array(sens_list)
df_norm.iloc[site_list.index(site_name), factors.index(param[0])] = sens_list
if yy == 20:
print 'done'
break
yy += 1
xx += 1
fig_path = 'C:\\Users\\David\\Documents\\ArcGIS\\results\\Sensitivity_analysis\\normalized'
disp_pct = [(int(x)) for x in np.add(np.multiply(all_pct, 100.0), -100)]
# disp_pct.remove(0)
temps = range(-5, 6)
# temps.remove(0)
all_pct = [x * 0.1 for x in range(5, 16)]
# all_pct.remove(1.0)
ndvi_range = np.linspace(0.9, 1.7, 11)
ndvi_range = [round_to_value(x, 0.05) for x in ndvi_range]
# ndvi_range.remove(1.3)
ndvi_range = np.array(ndvi_range)
for index, row in df_norm.iterrows():
if row.name == 'La_Jencia': # ['South_Baldy', 'Water_Canyon', 'La_Jencia', 'Socorro']:
print index, row
fig = plt.figure(xx, figsize=(20, 10))
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
ax3 = ax1.twiny()
fig.subplots_adjust(bottom=0.2)
ax2.plot(temps, row[0], 'k', marker='x', label='Temperature (+/- 5 deg C)')
ax1.plot(disp_pct, row[1], 'blue', marker='o', label='Precipitation (+/- 50%)')
ax1.plot(disp_pct, row[2], 'purple', marker='^', label='Reference Evapotranspiration (+/- 50%)')
ax1.plot(disp_pct, row[3], 'brown', marker='h', label='Total Available Water (+/- 50%)')
ax3.plot(ndvi_range, row[4], 'green', marker='s', linestyle='-.', label='Normalized Density Vegetation\n'
' Index Conversion Factor (0.9 - 1.8)')
ax1.plot(disp_pct, row[5], 'red', marker='*', label='Soil Evaporation Layer Thickness (+/- 50%)')
ax1.set_xlabel(r"Parameter Change (%)", fontsize=16)
ax1.set_ylabel(r"Total Recharge per Year (mm)", fontsize=16)
ax2.set_xlabel(r"Temperature Change (C)", fontsize=16)
ax2.xaxis.set_ticks_position("bottom")
ax2.xaxis.set_label_position("bottom")
ax2.spines["bottom"].set_position(("axes", -0.15))
ax2.set_frame_on(True)
ax2.patch.set_visible(False)
for sp in ax2.spines.itervalues():
sp.set_visible(False)
ax2.spines['bottom'].set_visible(True)
ax3.set_xlabel(r"NDVI to Crop Coefficient Conversion Factor", fontsize=16)
ax3.xaxis.set_ticks_position("bottom")
ax3.xaxis.set_label_position("bottom")
ax3.spines["bottom"].set_position(("axes", -0.3))
ax3.set_frame_on(True)
ax3.patch.set_visible(False)
for sp in ax3.spines.itervalues():
sp.set_visible(False)
ax3.spines['bottom'].set_visible(True)
plt.title('Variation of ETRM Physical Parameters at {}'.format(index.replace('_', ' ')), y=1.08, fontsize=20)
handle1, label1 = ax1.get_legend_handles_labels()
handle2, label2 = ax2.get_legend_handles_labels()
handle3, label3 = ax3.get_legend_handles_labels()
handles, labels = handle1 + handle2 + handle3, label1 + label2 + label3
ax1.legend(handles, labels, loc=0)
plt.show()
plt.savefig('{}\\{}_spider_10JUL16_2'.format(fig_path, index), ext='png', figsize=(20, 10))
plt.close(fig)
| apache-2.0 |
FrancoisRheaultUS/dipy | doc/examples/reconst_qtdmri.py | 4 | 18834 | # -*- coding: utf-8 -*-
"""
=================================================================
Estimating diffusion time dependent q-space indices using qt-dMRI
=================================================================
Effective representation of the four-dimensional diffusion MRI signal --
varying over three-dimensional q-space and diffusion time -- is a sought-after
and still unsolved challenge in diffusion MRI (dMRI). We propose a functional
basis approach that is specifically designed to represent the dMRI signal in
this qtau-space [Fick2017]_. Following recent terminology, we refer to our
qtau-functional basis as :math:`q\tau`-dMRI. We use GraphNet regularization --
imposing both signal smoothness and sparsity -- to drastically reduce the
number of diffusion-weighted images (DWIs) that is needed to represent the dMRI
signal in the qtau-space. As the main contribution, :math:`q\tau`-dMRI provides
the framework to -- without making biophysical assumptions -- represent the
:math:`q\tau`-space signal and estimate time-dependent q-space indices
(:math:`q\tau`-indices), providing a new means for studying diffusion in
nervous tissue. :math:`q\tau`-dMRI is the first of its kind in being
specifically designed to provide open interpretation of the
:math:`q\tau`-diffusion signal.
:math:`q\tau`-dMRI can be seen as a time-dependent extension of the MAP-MRI
functional basis [Ozarslan2013]_, and all the previously proposed q-space
can be estimated for any diffusion time. These include rotationally
invariant quantities such as the Mean Squared Displacement (MSD), Q-space
Inverse Variance (QIV) and Return-To-Origin Probability (RTOP). Also
directional indices such as the Return To the Axis Probability (RTAP) and
Return To the Plane Probability (RTPP) are available, as well as the
Orientation Distribution Function (ODF).
In this example we illustrate how to use the :math:`q\tau`-dMRI to estimate
time-dependent q-space indices from a :math:`q\tau`-acquisition of a mouse.
First import the necessary modules:
"""
from dipy.data.fetcher import (fetch_qtdMRI_test_retest_2subjects,
read_qtdMRI_test_retest_2subjects)
from dipy.reconst import qtdmri, dti
import matplotlib.pyplot as plt
import numpy as np
"""
Download and read the data for this tutorial.
:math:`q\tau`-dMRI requires data with multiple gradient directions, gradient
strength and diffusion times. We will use the test-retest acquisitions of two
mice that were used in the test-retest study by [Fick2017]_. The data itself
is freely available and citeable at [Wassermann2017]_.
"""
fetch_qtdMRI_test_retest_2subjects()
data, cc_masks, gtabs = read_qtdMRI_test_retest_2subjects()
"""
data contains 4 qt-dMRI datasets of size [80, 160, 5, 515]. The first two are
the test-retest datasets of the first mouse and the second two are those of the
second mouse. cc_masks contains 4 corresponding binary masks for the corpus
callosum voxels in the middle slice that were used in the test-retest study.
Finally, gtab contains the qt-dMRI gradient tables for the DWIs in the dataset.
The data consists of 515 DWIs, divided over 35 shells, with 7 "gradient
strength shells" up to 491 mT/m, 5 equally spaced "pulse separation shells"
(big_delta) between [10.8-20] ms and a pulse duration (small_delta) of 5ms.
To visualize qt-dMRI acquisition schemes in an intuitive way, the qtdmri module
provides a visualization function to illustrate the relationship between
gradient strength (G), pulse separation (big_delta) and b-value:
"""
plt.figure()
qtdmri.visualise_gradient_table_G_Delta_rainbow(gtabs[0])
plt.savefig('qt-dMRI_acquisition_scheme.png')
"""
.. figure:: qt-dMRI_acquisition_scheme.png
:align: center
In the figure the dots represent measured DWIs in any direction, for a given
gradient strength and pulse separation. The background isolines represent the
corresponding b-values for different combinations of G and big_delta.
Next, we visualize the middle slices of the test-retest data sets with their
corresponding masks. To better illustrate the white matter architecture in the
data, we calculate DTI's fractional anisotropy (FA) over the whole slice and
project the corpus callosum mask on the FA image.:
"""
subplot_titles = ["Subject1 Test", "Subject1 Retest",
"Subject2 Test", "Subject2 Tetest"]
fig = plt.figure()
plt.subplots(nrows=2, ncols=2)
for i, (data_, mask_, gtab_) in enumerate(zip(data, cc_masks, gtabs)):
# take the middle slice
data_middle_slice = data_[:, :, 2]
mask_middle_slice = mask_[:, :, 2]
# estimate fractional anisotropy (FA) for this slice
tenmod = dti.TensorModel(gtab_)
tenfit = tenmod.fit(data_middle_slice, data_middle_slice[..., 0] > 0)
fa = tenfit.fa
# set mask color to green with 0.5 opacity as overlay
mask_template = np.zeros(np.r_[mask_middle_slice.shape, 4])
mask_template[mask_middle_slice == 1] = np.r_[0., 1., 0., .5]
# produce the FA images with corpus callosum masks.
plt.subplot(2, 2, 1 + i)
plt.title(subplot_titles[i], fontsize=15)
plt.imshow(fa, cmap='Greys_r', origin=True, interpolation='nearest')
plt.imshow(mask_template, origin=True, interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig('qt-dMRI_datasets_fa_with_ccmasks.png')
"""
.. figure:: qt-dMRI_datasets_fa_with_ccmasks.png
:align: center
Next, we use qt-dMRI to estimate of time-dependent q-space indices
(q$\tau$-indices) for the masked voxels in the corpus callosum of each dataset.
In particular, we estimate the Return-to-Original, Return-to-Axis and
Return-to-Plane Probability (RTOP, RTAP and RTPP), as well as the Mean Squared
Displacement (MSD).
In this example we don't extrapolate the data beyond the maximum diffusion
time, so we estimate :math:`q\tau` indices between the minimum and maximum
diffusion times of the data at 5 equally spaced points. However, it should the
noted that qt-dMRI's combined smoothness and sparsity regularization allows
for smooth interpolation at any :math:`q\tau` position. In other words, once
the basis is fitted to the data, its coefficients describe the the entire
:math:`q\tau`-space, and any :math:`q\tau`-position can be freely recovered.
This including points beyond the dataset's maximum :math:`q\tau` value
(although this should be done with caution).
"""
tau_min = gtabs[0].tau.min()
tau_max = gtabs[0].tau.max()
taus = np.linspace(tau_min, tau_max, 5)
qtdmri_fits = []
msds = []
rtops = []
rtaps = []
rtpps = []
for i, (data_, mask_, gtab_) in enumerate(zip(data, cc_masks, gtabs)):
# select the corpus callsoum voxel for every dataset
cc_voxels = data_[mask_ == 1]
# initialize the qt-dMRI model.
# recommended basis orders are radial_order=6 and time_order=2.
# The combined Laplacian and l1-regularization using Generalized
# Cross-Validation (GCV) and Cross-Validation (CV) settings is most robust,
# but can be used separately and with weightings preset to any positive
# value to optimize for speed.
qtdmri_mod = qtdmri.QtdmriModel(
gtab_, radial_order=6, time_order=2,
laplacian_regularization=True, laplacian_weighting='GCV',
l1_regularization=True, l1_weighting='CV'
)
# fit the model.
# Here we take every 5th voxel for speed, but of course all voxels can be
# fit for a more robust result later on.
qtdmri_fit = qtdmri_mod.fit(cc_voxels[::5])
qtdmri_fits.append(qtdmri_fit)
# We estimate MSD, RTOP, RTAP and RTPP for the chosen diffusion times.
msds.append(np.array(list(map(qtdmri_fit.msd, taus))))
rtops.append(np.array(list(map(qtdmri_fit.rtop, taus))))
rtaps.append(np.array(list(map(qtdmri_fit.rtap, taus))))
rtpps.append(np.array(list(map(qtdmri_fit.rtpp, taus))))
"""
The estimated :math:`q\tau`-indices, for the chosen diffusion times, are now
stored in msds, rtops, rtaps and rtpps. The trends of these
:math:`q\tau`-indices over time say something about the restriction of
diffusing particles over time, which is currently a hot topic in the dMRI
community. We evaluate the test-retest reproducibility for the two subjects by
plotting the :math:`q\tau`-indices for each subject together. This example
will produce similar results as Fig. 10 in [Fick2017]_.
We first define a small function to plot the mean and standard deviation of the
:math:`q\tau`-index trends in a subject.
"""
def plot_mean_with_std(ax, time, ind1, plotcolor, ls='-', std_mult=1,
label=''):
means = np.mean(ind1, axis=1)
stds = np.std(ind1, axis=1)
ax.plot(time, means, c=plotcolor, lw=3, label=label, ls=ls)
ax.fill_between(time,
means + std_mult * stds,
means - std_mult * stds,
alpha=0.15, color=plotcolor)
ax.plot(time, means + std_mult * stds, alpha=0.25, color=plotcolor)
ax.plot(time, means - std_mult * stds, alpha=0.25, color=plotcolor)
"""
We start by showing the test-retest MSD of both subjects over time. We plot the
:math:`q\tau`-indices together with :math:`q\tau`-index trends of free
diffusion with different diffusivities as background.
"""
# we first generate the data to produce the background index isolines.
Delta_ = np.linspace(0.005, 0.02, 100)
MSD_ = np.linspace(4e-5, 10e-5, 100)
Delta_grid, MSD_grid = np.meshgrid(Delta_, MSD_)
D_grid = MSD_grid / (6 * Delta_grid)
D_levels = np.r_[1, 5, 7, 10, 14, 23, 30] * 1e-4
fig = plt.figure(figsize=(10, 3))
# start with the plot of subject 1.
ax = plt.subplot(1, 2, 1)
# first plot the background
plt.contourf(Delta_ * 1e3, 1e5 * MSD_, D_grid, levels=D_levels, cmap='Greys',
alpha=.5)
# plot the test-retest mean MSD and standard deviation of subject 1.
plot_mean_with_std(ax, taus * 1e3, 1e5 * msds[0], 'r', 'dashdot',
label='MSD Test')
plot_mean_with_std(ax, taus * 1e3, 1e5 * msds[1], 'g', 'dashdot',
label='MSD Retest')
ax.legend(fontsize=13)
# plot some text markers to clarify the background diffusivity lines.
ax.text(.0091 * 1e3, 6.33, 'D=14e-4', fontsize=12, rotation=35)
ax.text(.0091 * 1e3, 4.55, 'D=10e-4', fontsize=12, rotation=25)
ax.set_ylim(4, 9.5)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_title(r'Test-Retest MSD($\tau$) Subject 1', fontsize=15)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_ylabel('MSD ($10^{-5}mm^2$)', fontsize=17)
# then do the same thing for subject 2.
ax = plt.subplot(1, 2, 2)
plt.contourf(Delta_ * 1e3, 1e5 * MSD_, D_grid, levels=D_levels, cmap='Greys',
alpha=.5)
cb = plt.colorbar()
cb.set_label('Free Diffusivity ($mm^2/s$)', fontsize=18)
plot_mean_with_std(ax, taus * 1e3, 1e5 * msds[2], 'r', 'dashdot')
plot_mean_with_std(ax, taus * 1e3, 1e5 * msds[3], 'g', 'dashdot')
ax.set_ylim(4, 9.5)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_title(r'Test-Retest MSD($\tau$) Subject 2', fontsize=15)
plt.savefig('qt_indices_msd.png')
"""
.. figure:: qt_indices_msd.png
:align: center
You can see that the MSD in both subjects increases over time, but also slowly
levels off as time progresses. This makes sense as diffusing particles are
becoming more restricted by surrounding tissue as time goes on. You can also
see that for Subject 1 the index trends nearly perfectly overlap, but for
subject 2 they are slightly off, which is also what we found in the paper.
Next, we follow the same procedure to estimate the test-retest RTAP, RTOP and
RTPP over diffusion time for both subject. For ease of comparison, we will
estimate all three in the same unit [1/mm] by taking the square root of RTAP
and the cubed root of RTOP.
"""
# Again, first we define the data for the background illustration.
Delta_ = np.linspace(0.005, 0.02, 100)
RTXP_ = np.linspace(1, 200, 100)
Delta_grid, RTXP_grid = np.meshgrid(Delta_, RTXP_)
D_grid = 1 / (4 * RTXP_grid ** 2 * np.pi * Delta_grid)
D_levels = np.r_[1, 2, 3, 4, 6, 9, 15, 30] * 1e-4
D_colors = np.tile(np.linspace(.8, 0, 7), (3, 1)).T
# We start with estimating the RTOP illustration.
fig = plt.figure(figsize=(10, 3))
ax = plt.subplot(1, 2, 1)
plt.contourf(Delta_ * 1e3, RTXP_, D_grid, colors=D_colors, levels=D_levels,
alpha=.5)
plot_mean_with_std(ax, taus * 1e3, rtops[0] ** (1 / 3.), 'r', '--',
label='RTOP$^{1/3}$ Test')
plot_mean_with_std(ax, taus * 1e3, rtops[1] ** (1 / 3.), 'g', '--',
label='RTOP$^{1/3}$ Retest')
ax.legend(fontsize=13)
ax.text(.0091 * 1e3, 162, 'D=3e-4', fontsize=12, rotation=-22)
ax.text(.0091 * 1e3, 140, 'D=4e-4', fontsize=12, rotation=-20)
ax.text(.0091 * 1e3, 113, 'D=6e-4', fontsize=12, rotation=-16)
ax.set_ylim(54, 170)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_title(r'Test-Retest RTOP($\tau$) Subject 1', fontsize=15)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_ylabel('RTOP$^{1/3}$ (1/mm)', fontsize=17)
ax = plt.subplot(1, 2, 2)
plt.contourf(Delta_ * 1e3, RTXP_, D_grid, colors=D_colors, levels=D_levels,
alpha=.5)
cb = plt.colorbar()
cb.set_label('Free Diffusivity ($mm^2/s$)', fontsize=18)
plot_mean_with_std(ax, taus * 1e3, rtops[2] ** (1 / 3.), 'r', '--')
plot_mean_with_std(ax, taus * 1e3, rtops[3] ** (1 / 3.), 'g', '--')
ax.set_ylim(54, 170)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_title(r'Test-Retest RTOP($\tau$) Subject 2', fontsize=15)
plt.savefig('qt_indices_rtop.png')
"""
.. figure:: qt_indices_rtop.png
:align: center
Similarly as MSD, the RTOP is related to the restriction that particles are
experiencing and is also rotationally invariant. RTOP is defined as the
probability that particles are found at the same position at the time of both
gradient pulses. As time increases, the odds become smaller that a particle
will arrive at the same position it left, which is illustrated by all RTOP
trends in the figure. Notice that the estimated RTOP trends decrease less fast
than free diffusion, meaning that particles experience restriction over time.
Also notice that the RTOP trends in both subjects nearly perfectly overlap.
Next, we estimate two directional :math:`q\tau`-indices, RTAP and RTPP,
describing particle restriction perpendicular and parallel to the orientation
of the principal diffusivity in that voxel. If the voxel describes coherent
white matter (which it does in our corpus callosum example), then they describe
properties related to restriction perpendicular and parallel to the axon
bundles.
"""
# First, we estimate the RTAP trends.
fig = plt.figure(figsize=(10, 3))
ax = plt.subplot(1, 2, 1)
plt.contourf(Delta_ * 1e3, RTXP_, D_grid, colors=D_colors, levels=D_levels,
alpha=.5)
plot_mean_with_std(ax, taus * 1e3, np.sqrt(rtaps[0]), 'r', '-',
label='RTAP$^{1/2}$ Test')
plot_mean_with_std(ax, taus * 1e3, np.sqrt(rtaps[1]), 'g', '-',
label='RTAP$^{1/2}$ Retest')
ax.legend(fontsize=13)
ax.text(.0091 * 1e3, 162, 'D=3e-4', fontsize=12, rotation=-22)
ax.text(.0091 * 1e3, 140, 'D=4e-4', fontsize=12, rotation=-20)
ax.text(.0091 * 1e3, 113, 'D=6e-4', fontsize=12, rotation=-16)
ax.set_ylim(54, 170)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_title(r'Test-Retest RTAP($\tau$) Subject 1', fontsize=15)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_ylabel('RTAP$^{1/2}$ (1/mm)', fontsize=17)
ax = plt.subplot(1, 2, 2)
plt.contourf(Delta_ * 1e3, RTXP_, D_grid, colors=D_colors, levels=D_levels,
alpha=.5)
cb = plt.colorbar()
cb.set_label('Free Diffusivity ($mm^2/s$)', fontsize=18)
plot_mean_with_std(ax, taus * 1e3, np.sqrt(rtaps[2]), 'r', '-')
plot_mean_with_std(ax, taus * 1e3, np.sqrt(rtaps[3]), 'g', '-')
ax.set_ylim(54, 170)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_title(r'Test-Retest RTAP($\tau$) Subject 2', fontsize=15)
plt.savefig('qt_indices_rtap.png')
# Finally the last one for RTPP.
fig = plt.figure(figsize=(10, 3))
ax = plt.subplot(1, 2, 1)
plt.contourf(Delta_ * 1e3, RTXP_, D_grid, colors=D_colors, levels=D_levels,
alpha=.5)
plot_mean_with_std(ax, taus * 1e3, rtpps[0], 'r', ':', label='RTPP Test')
plot_mean_with_std(ax, taus * 1e3, rtpps[1], 'g', ':', label='RTPP Retest')
ax.legend(fontsize=13)
ax.text(.0091 * 1e3, 113, 'D=6e-4', fontsize=12, rotation=-16)
ax.text(.0091 * 1e3, 91, 'D=9e-4', fontsize=12, rotation=-13)
ax.text(.0091 * 1e3, 69, 'D=15e-4', fontsize=12, rotation=-10)
ax.set_ylim(54, 170)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_title(r'Test-Retest RTPP($\tau$) Subject 1', fontsize=15)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_ylabel('RTPP (1/mm)', fontsize=17)
ax = plt.subplot(1, 2, 2)
plt.contourf(Delta_ * 1e3, RTXP_, D_grid, colors=D_colors, levels=D_levels,
alpha=.5)
cb = plt.colorbar()
cb.set_label('Free Diffusivity ($mm^2/s$)', fontsize=18)
plot_mean_with_std(ax, taus * 1e3, rtpps[2], 'r', ':')
plot_mean_with_std(ax, taus * 1e3, rtpps[3], 'g', ':')
ax.set_ylim(54, 170)
ax.set_xlim(.009 * 1e3, 0.0185 * 1e3)
ax.set_xlabel('Diffusion Time (ms)', fontsize=17)
ax.set_title(r'Test-Retest RTPP($\tau$) Subject 2', fontsize=15)
plt.savefig('qt_indices_rtpp.png')
"""
.. figure:: qt_indices_rtap.png
:align: center
.. figure:: qt_indices_rtpp.png
:align: center
As those of RTOP, the trends in RTAP and RTPP also decrease over time. It can
be seen that RTAP$^{1/2}$ is always bigger than RTPP, which makes sense as
particles in coherent white matter experience more restriction perpendicular to
the white matter orientation than parallel to it. Again, in both subjects the
test-retest RTAP and RTPP is nearly perfectly consistent.
Aside from the estimation of :math:`q\tau`-space indices, :math:`q\tau`-dMRI
also allows for the estimation of time-dependent ODFs. Once the Qtdmri model
is fitted it can be simply called by qtdmri_fit.odf(sphere,
s=sharpening_factor). This is identical to how the mapmri module functions,
and allows to study the time-dependence of ODF directionality.
This concludes the example on qt-dMRI. As we showed, approaches such as qt-dMRI
can help in studying the (finite-:math:`\tau`) temporal properties of diffusion
in biological tissues. Differences in :math:`q\tau`-index trends could be
indicative of underlying structural differences that affect the time-dependence
of the diffusion process.
.. [Fick2017]_ Fick, Rutger HJ, et al. "Non-Parametric GraphNet-Regularized
Representation of dMRI in Space and Time", Medical Image Analysis,
2017.
.. [Wassermann2017]_ Wassermann, Demian, et al. "Test-Retest qt-dMRI datasets
for 'Non-Parametric GraphNet-Regularized Representation of dMRI in
Space and Time' [Data set]". Zenodo.
https://doi.org/10.5281/zenodo.996889, 2017.
"""
| bsd-3-clause |
devanshdalal/scikit-learn | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
iandriver/RNA-sequence-tools | RNA_Seq_analysis/make_monocle_data.py | 2 | 5813 | import os
import cPickle as pickle
import pandas as pd
import matplotlib
matplotlib.use('QT4Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from matplotlib.ticker import LinearLocator
import seaborn as sns
import numpy as np
from operator import itemgetter
#the file path where gene list will be and where new list will output
path_to_file = '/Volumes/Seq_data/cuffnorm_hu_ht280_norm_a6'
#name of file containing genes and GO terms (generated by gene_lookup.py)
gene_file_source = 'go_search_genes_lung_all.txt'
#File that maps 96 well format (A1-H12) back to capture site info (1-96) for fluidigm output
plate_map = 'plate_map_grid.txt'
base_name = 'hu_ht280_norm_a6'
#file with cell capture information on each cell
cell_capture_file = 'Cell_loading_hu_ht280_alpha6_all.txt'
#load file gene
by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file, base_name+'_outlier_filtered.txt'), sep='\t')
by_gene = by_cell.transpose()
#create list of genes
gene_list = by_cell.index.tolist()
#create cell list
cell_list = [x for x in list(by_cell.columns.values)]
plate_map_df = pd.DataFrame.from_csv(os.path.join(path_to_file, plate_map), sep='\t')
def ret_loading_pos(pos, plate_map_df):
let_dict = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7}
let = pos[0]
num = pos[1:]
num_col = plate_map_df[num]
cnum = num_col.iloc[let_dict[let]]
return int(cnum.strip('C'))
df_by_gene1 = pd.DataFrame(by_gene, columns=gene_list, index=cell_list)
df_by_cell1 = pd.DataFrame(by_cell, columns=cell_list, index=gene_list)
def make_new_matrix(org_matrix_by_cell, gene_list_file):
split_on='_'
gene_df = pd.read_csv(os.path.join(path_to_file, gene_list_file), delimiter= '\t')
gene_list = gene_df['GeneID'].tolist()
group_list = gene_df['GroupID'].tolist()
gmatrix_df = org_matrix_by_cell[gene_list]
cmatrix_df = gmatrix_df.transpose()
score_df = pd.DataFrame(zip(gene_list, group_list), columns=['GeneID', 'GroupID'])
sample_data = pd.read_csv(os.path.join(path_to_file, 'samples.table'), delimiter= '\t', index_col=0)
by_sample = sample_data.transpose()
map_data = pd.read_csv(os.path.join(path_to_file, 'results_'+base_name+'_align.txt'), delimiter= '\t', index_col=0)
by_cell_map = map_data.transpose()
loading_data = pd.read_csv(os.path.join(path_to_file, cell_capture_file), delimiter= '\t', index_col=0)
l_data = loading_data.transpose()
cell_list = gmatrix_df.index.tolist()
cell_data = []
cell_label_dict ={'norm':('norm_ht280', 'ctrl', 'norm'),
'scler':('scler_ht280', 'diseased', 'scler'),
'IPF':('hu_IPF_HTII_280','diseased', 'IPF'),
'DK':('DK_ht280','diseased', 'DK'),
'alpha6_norm':('norm_alpha6', 'ctrl', 'norm'),
'alpha6_scler':('scler_alpha6', 'diseased', 'scler')}
new_cell_list = []
old_cell_list = []
for cell in cell_list:
match = False
if cell[0:6] == 'norm_h':
k = 'norm'
tracking_id = cell
match = True
num = cell.split('_')[2]
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:7] == 'scler_h':
k='scler'
tracking_id = cell
num = cell.split('_')[2]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:2] == 'hu':
k = 'IPF'
tracking_id = cell
num = cell.split('_')[4]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:2] == 'DK':
k = 'DK'
tracking_id = cell
num = cell.split('_')[2]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:7] == 'scler_a':
k = 'alpha6_scler'
tracking_id = cell
num = cell.split('_')[2]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
elif cell[0:6] == 'norm_a':
k = 'alpha6_norm'
tracking_id = cell
num = cell.split('_')[2]
match = True
old_cell_list.append(cell)
new_cell_list.append(tracking_id)
if match:
condition = cell_label_dict[k][1]
disease = cell_label_dict[k][2]
loading_df = loading_data[cell_label_dict[k][0]]
print ret_loading_pos(num, plate_map_df), num
loading = loading_df.iloc[ret_loading_pos(num, plate_map_df)-1]
print num, tracking_id
if 'single' in loading:
single_cell = 'yes'
else:
single_cell = 'no'
print by_cell_map[cell]
total_mass = by_sample[cell+'_0'][1]
input_mass = by_cell_map[cell][0]
per_mapped = by_cell_map[cell][4]
c_data_tup = (tracking_id,total_mass,input_mass,per_mapped,condition,disease,single_cell)
print c_data_tup
cell_data.append(c_data_tup)
score_df.to_csv(os.path.join(path_to_file, 'gene_feature_data.txt'), sep = '\t', index=False)
new_cmatrix_df = cmatrix_df[old_cell_list]
new_cmatrix_df.columns = new_cell_list
new_cmatrix_df.to_csv(os.path.join(path_to_file, 'goterms_monocle_count_matrix.txt'), sep = '\t', index_col=0)
cell_data_df = pd.DataFrame(cell_data, columns=['tracking_id','total_mass','input_mass','per_mapped','condition','disease','single_cell'])
cell_data_df.to_csv(os.path.join(path_to_file, 'cell_feature_data.txt'), sep = '\t', index=False)
make_new_matrix(df_by_gene1, gene_file_source)
| mit |
SkumarAG/aiProject | neural/neural.py | 2 | 4300 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 17:56:47 2016
@author: Ramanuja
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 15:06:19 2016
@author: Ramanuja
"""
import sys
total = 5000
from ..fileread.TweetRead import importData
import numpy as np
from ..bagofwords.textToVector import bagofWords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import shuffle
X, y = importData()
X = X[1:]
y = y[1:]
X, y = shuffle(X, y, random_state=0)
#X = np.array(X)
#vectorizer = CountVectorizer(min_df=2)
#X = vectorizer.fit_transform(X)
X = np.array(bagofWords(X))
y = np.array(map(int,list(y)))
num_examples =len(X)
ratio = .6
# training data
X_train = X[1:num_examples*ratio]
y_train = y[1:num_examples*ratio]
#test data
X_test = X[num_examples*ratio:]
y_test = y[num_examples*ratio:]
barLength = 10
status = ""
nn_input_dim = len(X[0])#neural netwoek input dimension
nn_output_dim = 2# true or false
neuron_number = 6 # in a layer
# Gradient descent parameters (I picked these by hand)
alpha = 0.001 # learning rate for gradient descent
reg_lambda = 0.001 # regularization strength
num_passe = 10000
def weight_init(L1,L2):
return np.sqrt(6)/np.sqrt(L1 + L2)
def calculate_loss(model):
W1, W2 = model['W1'], model['W2']
# Forward propagation to calculate our predictions
num_ex = len(X_train)
z1 = X_train.dot(W1)
a1 = np.tanh(z1)
z2 = a1.dot(W2)
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_ex), y_train])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1./num_ex * data_loss
def status(i,model):
progress = (float(i)/num_passe)
block = int(round(barLength*progress))
sys.stdout.write('\r')
text = "[{0}] {1}% Completed.".format( "#"*block + "-"*(barLength-block), format(progress*100,".2f"),status)
sys.stdout.write(text)
sys.stdout.write (" Current Loss %.5f." %(calculate_loss(model)))
sys.stdout.flush()
def predict(model, x):
W1, W2 = model['W1'], model['W2']
# Forward propagation
z1 = x.dot(W1)
a1 = np.tanh(z1)
z2 = a1.dot(W2)
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, keepdims=True)
return np.argmax(probs)
def build_model(nn_hdim, num_passes=num_passe, print_loss=False):
#Initilization of weight
#L1 number of input in the given layer
#L2 number of input in the given layer
L1 = nn_input_dim
L2 = neuron_number
esp_init = weight_init(L1,L2)
W1 = np.random.uniform(-esp_init,esp_init,[nn_input_dim,nn_hdim])
L1 = neuron_number
L2 = nn_output_dim
esp_init = weight_init(L1,L2)
W2 = np.random.uniform(-esp_init,esp_init,[nn_hdim, nn_output_dim])
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in xrange(0, num_passes):
# Forward propagation
z1 = X_train.dot(W1)
a1 = np.tanh(z1)
z2 = a1.dot(W2)
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
num_ex = len(X_train)
# Backpropagation
delta3 = probs
delta3[range(num_ex), y_train] -= 1
dW2 = (a1.T).dot(delta3)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))# diff of tanh -- in future i will use gradient desent to calculate this
dW1 = np.dot(X_train.T, delta2)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 =dW2 + (reg_lambda * W2)
dW1 = dW1 + (reg_lambda * W1)
# Gradient descent parameter update
W1 = W1 -(alpha * dW1)
W2 = W2 -(alpha * dW2)
# Assign new parameters to the model
model = { 'W1': W1, 'W2': W2}
status(i,model)
return model
# Build a model
def test:
model = build_model(neuron_number, print_loss=True)
test = "Heavy traffic at vest avenue"
#predict(model, x) | apache-2.0 |
css-lucas/GAT | gat/core/sna/SNAcityUpdate.py | 1 | 33078 | import tempfile
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import xlrd
from networkx.algorithms import bipartite as bi
from networkx.algorithms import centrality
from itertools import product
from collections import defaultdict
import pandas as pd
import datetime
from gat.core.sna import propensities
from gat.core.sna import resilience
from gat.core.sna import cliques
from gat.core.sna import ergm
class SNA():
def __init__(self, excel_file, nodeSheet, attrSheet=None):
self.subAttrs = ["W", "SENT", "SZE", "AMT"]
self.header, self.list = self.readFile(excel_file, nodeSheet)
if attrSheet != None:
self.attrHeader, self.attrList = self.readFile(excel_file, attrSheet)
self.G = nx.DiGraph()
self.nodes = []
self.edges = []
self.nodeSet = []
self.clustering_dict = {}
self.latapy_clustering_dict = {}
self.closeness_centrality_dict = {}
self.betweenness_centrality_dict = {}
self.degree_centrality_dict = {}
self.eigenvector_centrality_dict = {}
self.katz_centraltiy_dict = {}
self.load_centrality_dict = {}
self.communicability_centrality_dict = {}
self.communicability_centrality_exp_dict = {}
self.node_attributes_dict = {}
self.classList = ['Agent','Organization','Audience','Role','Event','Belief','Symbol','Knowledge','Task','Actor']
self.attrSheet = attrSheet
# Read xlsx file and save the header and all the cells, each a dict with value and header label
# Input: xlsx file, sheet
def readFile(self, excel_file, sheet):
workbook = xlrd.open_workbook(excel_file)
sh = workbook.sheet_by_name(sheet)
header = [str(sh.cell(0, col).value).strip("\n") for col in range(sh.ncols)]
New_ncols = sh.ncols - 1
# If any, delete all the empty features in the header
while header[New_ncols] == '':
header.remove(header[New_ncols])
New_ncols -= 1
# a list of nodes
list = []
for row in range(1, sh.nrows):
tempList = []
for col in range(New_ncols + 1):
feature = str(sh.cell(0, col).value).strip("\n")
cell = sh.cell(row, col).value
if type(cell) == type(""):
val = cell.strip("\n")
else:
val = str(cell)
if val != "": # handle empty cells
# Make each node a dict with node name and node header, to assign later
tempList.append({'val': val, 'header': feature}) # need to define attributes later
list.append(tempList)
# remove repeated column titles
consolidatedHeader = []
for feature in header:
if (feature not in consolidatedHeader) and (feature not in self.subAttrs):
consolidatedHeader.append(feature)
return consolidatedHeader, list
# create set of nodes for multipartite graph
# name = names of the node. This is defined by the header. ex: Abbasi-Davani.F: Name or Abbasi-Davani.F: Faction leader
# nodeSet = names that define a set of node. For example, we can define Person, Faction Leader, and Party Leader as ".['agent']"
# note: len(name) = len(nodeSet), else code fails
def createNodeList(self, nodeSet):
for row in self.list:
for node in row:
if node['header'] in nodeSet and node['val'] != "":
# strip empty cells
self.G.add_node(node['val'], block=node['header'])
self.nodeSet = nodeSet
self.nodes = nx.nodes(self.G)
def loadOntology(self, source, classAssignments):
# Creating an edge list and setting its length for the conditional iterations:
b = self.attrList
y = len(b)
# Creating master edge list, and empty lists to fill from each ontology class
classLists = defaultdict(list) # creates a dictionary with default list values, no need to initialize - nifty!
edgeList = []
# iterating through ontology classes to add them to the network as nodes connected by weighted
# edge attributes to other ontological entities
for x in range(0, y):
for q in range(0, len(b[x])):
nodeHeader = b[x][q]['header']
nodeClass = classAssignments.get(nodeHeader)
if nodeHeader == source and b[x][q]['val'] is not None:
classLists['actor'].append(b[x][q]['val'])
if nodeClass == 'Belief' and b[x][q]['val'] is not None:
classLists['belief'].append(b[x][q]['val'])
if nodeClass == 'Symbols' and b[x][q]['val'] is not None:
classLists['symbol'].append(b[x][q]['val'])
if nodeClass == 'Resource' and b[x][q]['val'] is not None:
classLists['resource'].append(b[x][q]['val'])
if nodeClass == 'Agent' and b[x][q]['val'] is not None:
classLists['agent'].append(b[x][q]['val'])
if nodeClass == 'Organization' and b[x][q]['val'] is not None:
classLists['org'].append(b[x][q]['val'])
if nodeClass == 'Event' and b[x][q]['val'] is not None:
classLists['event'].append(b[x][q]['val'])
if nodeClass == 'Audience' and b[x][q]['val'] is not None:
classLists['aud'].append(b[x][q]['val'])
# removing duplicates from each list
# (this does not remove the effect that numerous connections to one node have on the network)
classLists = {key: set(val) for key, val in classLists.items()} # dict comprehension method
# adding ontological class to each node as node attribute
stringDict = {
'actor': 'Actor',
'belief': 'Belief',
'symbol': 'Symbol',
'resource': 'Resource',
'agent': 'Agent',
'org': 'Organization',
'aud': 'Audience',
'event': 'Event',
'role': 'Role',
'know': 'Knowledge',
'taskModel': 'Task Model',
'location': 'Location',
'title': 'Title',
'position': 'position',
}
for x in nx.nodes(self.G):
for key, entityList in classLists.items():
if x in entityList:
self.G.node[x]['ontClass'] = stringDict[key]
# Input: header list and list of attributes with header label from attribute sheet
# Output: updated list of nodes with attributes
def loadAttributes(self):
for row in self.attrList:
nodeID = row[0]['val']
for cell in row[1:]:
if cell['val'] != '':
if nodeID in self.nodes:
attrList = []
node = self.G.node[nodeID]
if cell['header'] in self.subAttrs: # handle subattributes, e.g. weight
prevCell = row[row.index(cell) - 1]
key = {}
while prevCell['header'] in self.subAttrs:
key[prevCell['header']] = prevCell['val']
prevCell = row[row.index(prevCell) - 1]
key[cell['header']] = cell['val']
for value in node[prevCell['header']]:
if prevCell['val'] in value:
listFlag = True if type(value) is list else False
attrList.append([value[0], key] if listFlag else [value, key]) # weighted attributes take the form [value, weight]
else:
attrList.append(value)
attrID = prevCell['header']
else: # if the attribute is not a subattribute
if cell['header'] in self.G.node[nodeID]:
attrList = (node[cell['header']])
attrList.append(cell['val'])
attrID = cell['header']
self.changeAttribute(nodeID, attrList, attrID)
# Input: the node set that will serve as the source of all links
# Output: updated list of edges connecting nodes in the same row
def createEdgeList(self, sourceSet):
list = self.list
edgeList = []
for row in list:
sourceNodes = []
for node in row:
if node['header'] in sourceSet:
sourceNodes.append(node['val'])
for source in sourceNodes:
for node in row:
if node['val'] != source and node['header'] in self.nodeSet:
sourceDict = self.G.node[source]
edge = (source, node['val'])
edgeList.append(edge)
# for ontological elements: add a weighted link if attribute appears in graph
for attrs in [val for key,val in sourceDict.items()]:
for attr in attrs:
if attr[0] == node['val']:
avg_w = np.average([float(val) for key, val in attr[1].items()])
self.G.add_edge(source, attr[0], weight=avg_w)
self.G.add_edges_from(edgeList)
self.edges = edgeList
def addEdges(self, pair): # deprecated, needs fixing - doesn't handle new dict structure
data = self.list
newEdgeList = []
for row in data:
first = row[pair[0]]['val']
second = row[pair[1]]['val']
if (first != '' and second != '') and (first != second):
newEdgeList.append((first, second))
self.G.add_edges_from(newEdgeList)
self.edges.extend(newEdgeList)
def calculatePropensities(self, emo=True, role=True):
for edge in self.edges: # for every edge, calculate propensities and append as an attribute
attributeDict = {}
emoPropList = propensities.propCalc(self, edge)[0] if emo else None
if len(emoPropList) > 0:
attributeDict['Emotion'] = emoPropList
attributeDict['emoWeight'] = propensities.aggregateProps(emoPropList)
rolePropList = propensities.propCalc(self, edge)[1] if role else None
if len(rolePropList) > 0:
attributeDict['Role'] = rolePropList
attributeDict['roleWeight'] = propensities.aggregateProps(rolePropList)
inflPropList = propensities.propCalc(self, edge)[2] if role else None
if len(inflPropList) > 0:
attributeDict['Influence'] = inflPropList
attributeDict['inflWeight'] = propensities.aggregateProps(inflPropList)
self.G[edge[0]][edge[1]] = attributeDict
self.edges = nx.edges(self.G)
def drag_predict(self,node):
## Smart prediction prototype
# ERGM generates probability matrix where order is G.nodes() x G.nodes()
ergm_prob_mat = ergm.probability(G=self.G)
# Assigning propensities probabilities and generating add_node links - TODO: merge this with overall method later
for target in self.G.nodes_iter():
emoProps, roleProps, inflProps = propensities.propCalc(self, (node, target))
if len(emoProps) > 0:
w = []
for prop in emoProps:
w.append(prop[4] * prop[5]) # add the product of the attribute weights to a list for each prop
w_avg = np.average(w) # find average propensity product weight
prob = np.random.binomial(1, w_avg * 1 / 2)
# use w_avg as the probability for a bernoulli distribution
if prob:
self.G.add_edge(node, target)
self.G[node][target]['Emotion'] = emoProps
self.G[node][target]['Role'] = roleProps if len(roleProps) > 0 else None
self.G[node][target]['Influence'] = inflProps if len(inflProps) > 0 else None
self.G[node][target]['Predicted'] = True
# iterate through all possible edges
for i, j in product(range(len(self.G.nodes())), repeat=2):
if i != j:
node = self.G.nodes()[i]
target = self.G.nodes()[j]
prob = ergm_prob_mat[i, j] * 0.05
# check props
if self.G[node].get(target) is not None:
## check emo props to modify adjacency prob matrix if present
if self.G[node][target].get('Emotion') is not None:
w = []
for prop in emoProps:
w.append(prop[4] * prop[5]) # add the product of the attribute weights to a list for each prop
w_avg = np.average(w)
prob = (prob + w_avg * 0.5) / 2
presence = np.random.binomial(1, prob) if prob < 1 else 1
# use adjacency prob mat as the probability for a bernoulli distribution
if presence:
self.G.add_edge(node, target)
self.G[node][target]['Predicted'] = True
# input: spreadsheet of bomb attacks
# output: updated dict of sentiment changes for each of attack events
def event_update(self, event_sheet, max_iter):
df = pd.read_excel(event_sheet)
bombData = df.to_dict(orient='index')
for x in range(0, len(bombData)):
bombData[x]['Date'] = datetime.datetime.strptime(str(bombData[x]['Date']), '%Y%m%d')
# using datetime to create iterations of flexible length
dateList = [bombData[x]['Date'] for x in bombData]
dateIter = (max(dateList) - min(dateList)) / 10
for i in range(max_iter):
nodeList = [(bombData[x]['Source'], bombData[x]['Target'], bombData[x]['CODE'], bombData['Location'])
for x in bombData if
min(dateList) + dateIter * i <= bombData[x]['Date'] < min(dateList) + dateIter * (i+1)]
# adding attacks to test graph by datetime period and iterating through to change sentiments
iterEdgeList = []
for node in nodeList:
for others in self.G.nodes_iter():
# rejection of source
if self.G.has_edge(node[0], others):
for type in ["Agent", "Org"]:
sent = self.G.node[node].get(type)
if sent is not None and sent[0] == others:
print(sent)
iterEdgeList.append((node[0], others, (sent[1] * .1) + sent[1]))
# sympathy for target
if self.G.has_edge(node[1], others):
sent = self.G.get_edge_data(node, others)
iterEdgeList.append((node[0], others, (sent[node, others] * 1.1) + sent[node, others]))
# sympathy for city population
if node[0]['Location'][0:5] == 'IRQNAJ' and \
self.G.node(data=True)['Belief'] == "Shi'ism" and \
self.G.node(data=True)['Belief'] + 1 > 0:
if node[0]['Location'] == 'IRQANBFAL' and \
self.G.node(data=True)['Belief'] == "Shi'ism" and \
self.G.node(data=True)['Belief'] + 1 > 0:
iterEdgeList.append((node[0], others, (sent[node, others] * 1.1) + sent[node, others]))
if node[0]['Location'][0:5] == 'IRQKIR' and \
self.G.node(data=True)['Belief'] == 'Kurdish Nationalism' and \
self.G.node(data=True)['Belief'] + 1 > 0:
iterEdgeList.append((node[0], others, (sent[node, others] * 1.1) + sent[node, others]))
# add an event node
event = 'Event '+str(node[2])+': '+node[0]+' to '+node[1]
self.G.add_node(event, {'ontClass':'Event', 'Name':['Event '+str(node[2])+': '+node[0]+' to '+node[1]], 'block':'Event',
'Description': 'Conduct suicide, car, or other non-military bombing'})
self.G.add_edge(node[0], event)
self.G.add_edge(event, node[1])
self.G.add_weighted_edges_from(iterEdgeList, 'W')
self.nodes = nx.nodes(self.G) # update node list
self.edges = nx.edges(self.G) # update edge list
# copy the original social network graph created with user input data.
# this will be later used to reset the modified graph to inital state
def copyGraph(self):
self.temp = self.G
def resetGraph(self):
self.G = self.temp
# remove edge and node. Note that when we remove a certain node, edges that are
# connected to such nodes are also deleted.
def removeNode(self, node):
if self.G.has_node(node):
self.G.remove_node(node)
self.nodes = nx.nodes(self.G)
for edge in self.edges:
if node in edge:
self.edges.remove(edge)
def addNode(self, node, attrDict={}, connections=[]):
self.G.add_node(node, attrDict)
for i in connections:
self.G.add_edge(node, i)
for k in attrDict: # add attributes based on user input
self.changeAttribute(node, [attrDict[k]], k)
self.changeAttribute(node, True, 'newNode')
self.drag_predict(node)
self.nodes = nx.nodes(self.G) # update node list
self.edges = nx.edges(self.G) # update edge list
def removeEdge(self, node1, node2):
if self.G.has_edge(node1, node2):
self.G.remove_edge(node1, node2)
# Change an attribute of a node
def changeAttribute(self, node, value, attribute="bipartite"):
if self.G.has_node(node):
self.G.node[node][attribute] = value
self.nodes = nx.nodes(self.G)
# Change node name
def relabelNode(self, oldNode, newNode):
if self.G.has_node(oldNode):
self.G.add_node(newNode, self.G.node[oldNode])
self.G.remove_node(oldNode)
self.nodes = nx.nodes(self.G)
# Check if node exists
def is_node(self, node):
return self.G.has_node(node)
# Getter for nodes and edges
def getNodes(self):
return self.nodes
def getEdges(self):
return self.edges
def communityDetection(self):
undirected = self.G.to_undirected()
self.eigenvector_centrality()
return cliques.louvain(G = undirected, centralities = self.eigenvector_centrality_dict)
def calculateResilience(self,baseline=True,robustness=True):
cliques_found = self.communityDetection()
simpleRes, baseline = resilience.averagePathRes(cliques_found, iters=5) if baseline is not None else None
robustnessRes = resilience.laplacianRes(cliques_found, iters=5) if robustness else None
return baseline,simpleRes,robustnessRes
##########################
## System-wide measures ##
##########################
# set all the properties with this function.
def set_property(self):
self.clustering()
self.latapy_clustering()
self.robins_alexander_clustering()
self.closeness_centrality()
self.betweenness_centrality()
self.degree_centrality()
self.katz_centrality()
self.eigenvector_centrality()
self.load_centrality()
self.communicability_centrality() # Not available for directed graphs
self.communicability_centrality_exp()
self.node_connectivity()
self.average_clustering()
def center(self):
return nx.center(self.G)
def diameter(self):
return nx.diameter(self.G)
def periphery(self):
return nx.periphery(self.G)
def eigenvector(self):
return nx.eigenvector_centrality(self.G)
def triadic_census(self):
return nx.triadic_census(self.G)
def average_degree_connectivity(self):
return nx.average_degree_connectivity(self.G)
def degree_assortativity_coefficient(self):
return nx.degree_assortativity_coefficient(self.G)
# node connectivity:
def node_connectivity(self):
return nx.node_connectivity(self.G)
# average clustering coefficient:
def average_clustering(self):
return nx.average_clustering(self.G.to_undirected())
# attribute assortivity coefficient:
def attribute_assortivity(self, attr):
return nx.attribute_assortativity_coefficient(self.G, attr)
# is strongly connected:
def is_strongly_connected(self):
return nx.is_strongly_connected(self.G)
# is weakly connected:
def is_weakly_connected(self):
return nx.is_weakly_connected(self.G)
#############################
## Node-dependent measures ##
#############################
# Sum sentiment for belief nodes
def sentiment(self,types,key):
sentiment_dict = {}
for type in types:
# nodes = [node for node in self.G.nodes_iter() if node.get("ontClass") == type]
# for typeNode in nodes:
for node in self.G.nodes_iter():
sent = self.G.node[node].get(type) # the belief attribute
if sent is not None:
for item in [item for item in sent if len(item) == 2]: #TODO better way to do this
if sentiment_dict.get(item[0]) is None:
sentiment_dict[item[0]] = float(item[1][key])
else:
sentiment_dict[item[0]] += float(item[1][key])
self.sentiment_dict = sentiment_dict
return sentiment_dict
# Find clustering coefficient for each nodes
def clustering(self):
self.clustering_dict = bi.clustering(self.G)
# set lapaty clustering to empty dictionary if there are more then 2 nodesets
# else return lapaty clustering coefficients for each nodes
def latapy_clustering(self):
if len(self.nodeSet) != 2 or len(set(self.nodeSet)) != 2:
self.latapy_clustering_dict = {}
else:
self.latapy_clustering_dict = bi.latapy_clustering(self.G)
def robins_alexander_clustering(self):
self.robins_alexander_clustering_dict = bi.robins_alexander_clustering(self.G)
# Find closeness_centrality coefficient for each nodes
def closeness_centrality(self):
self.closeness_centrality_dict = bi.closeness_centrality(self.G, self.nodes)
# Find degree_centrality coefficient for each nodes
def degree_centrality(self):
self.degree_centrality_dict = nx.degree_centrality(self.G)
# Find betweenness_centrality coefficient for each nodes
def betweenness_centrality(self):
self.betweenness_centrality_dict = nx.betweenness_centrality(self.G)
def eigenvector_centrality(self):
self.eigenvector_centrality_dict = nx.eigenvector_centrality(self.G, max_iter=500, tol=1e-01)
# self.eigenvector_centrality_dict = nx.eigenvector_centrality(self.G)
# self.eigenvector_centrality_dict = nx.eigenvector_centrality_numpy(self.G)
def katz_centrality(self):
self.katz_centrality_dict = centrality.katz_centrality(self.G)
def load_centrality(self):
self.load_centrality_dict = nx.load_centrality(self.G)
def communicability_centrality(self):
self.communicability_centrality_dict = nx.communicability_centrality(self.G)
def communicability_centrality_exp(self):
self.communicability_centrality_exp_dict = nx.communicability_centrality(self.G)
def node_attributes(self):
self.node_attributes_dict = self.G.node
def get_node_attributes(self, node):
return self.G.node[node]
def get_eigenvector_centrality(self, lst=[]):
if len(lst) == 0:
return self.eigenvector_centrality_dict
else:
sub_dict = {}
for key, value in self.clustering_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_clustering(self, lst=[]):
if len(lst) == 0:
return self.clustering_dict
else:
sub_dict = {}
for key, value in self.clustering_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_latapy_clustering(self, lst=[]):
if len(lst) == 0:
return self.latapy_clustering_dict
else:
sub_dict = {}
for key, value in self.latapy_clustering_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_robins_alexander_clustering(self, lst=[]):
if len(lst) == 0:
return self.robins_alexander_clustering_dict
else:
sub_dict = {}
for key, value in self.robins_alexander_clustering_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_closeness_centrality(self, lst=[]):
if len(lst) == 0:
return self.closeness_centrality_dict
else:
sub_dict = {}
for key, value in self.closeness_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_degree_centrality(self, lst=[]):
if len(lst) == 0:
return self.degree_centrality_dict
else:
sub_dict = {}
for key, value in self.degree_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_betweenness_centrality(self, lst=[]):
if len(lst) == 0:
return self.betweenness_centrality_dict
else:
sub_dict = {}
for key, value in self.betweenness_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_katz_centrality(self, lst=[]):
if len(lst) == 0:
return self.katz_centrality_dict
else:
sub_dict = {}
for key, value in self.katz_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_load_centrality(self, lst=[]):
if len(lst) == 0:
return self.load_centrality_dict
else:
sub_dict = {}
for key, value in self.load_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_communicability_centrality(self, lst=[]):
if len(lst) == 0:
return self.load_centrality_dict
else:
sub_dict = {}
for key, value in self.load_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
def get_communicability_centrality_exp(self, lst=[]):
if len(lst) == 0:
return self.communicability_centrality_dict
else:
sub_dict = {}
for key, value in self.communicability_centrality_dict:
if key in lst:
sub_dict[key] = value
return sub_dict
# draw 2D graph
# attr is a dictionary that has color and size as its value.
def graph_2D(self, attr, label=False):
block = nx.get_node_attributes(self.G, 'block')
Nodes = nx.nodes(self.G)
pos = nx.fruchterman_reingold_layout(self.G)
labels = {}
for node in block:
labels[node] = node
for node in set(self.nodeSet):
nx.draw_networkx_nodes(self.G, pos,
with_labels=False,
nodelist=[n for n in Nodes if bipartite[n] == node],
node_color=attr[node][0],
node_size=attr[node][1],
alpha=0.8)
nx.draw_networkx_edges(self.G, pos, width=1.0, alpha=0.5)
for key, value in pos.items():
pos[key][1] += 0.01
if label == True:
nx.draw_networkx_labels(self.G, pos, labels, font_size=8)
limits = plt.axis('off')
plt.show()
# draw 3 dimensional verison of the graph (returning html object)
def graph_3D(self):
n = nx.edges(self.G)
removeEdge = []
for i in range(len(n)):
if n[i][0] == '' or n[i][1] == '':
removeEdge.append(n[i])
for j in range(len(removeEdge)):
n.remove(removeEdge[j])
jgraph.draw(nx.edges(self.G), directed="true")
# note: this is for Vinay's UI
def plot_2D(self, attr, label=False):
plt.clf()
ontClass = nx.get_node_attributes(self.G, 'ontClass')
pos = nx.fruchterman_reingold_layout(self.G)
labels = {}
for node in ontClass:
labels[node] = node
for node in set(self.classList):
nx.draw_networkx_nodes(self.G, pos,
with_labels=False,
nodelist=[key for key, val in ontClass.items() if val == node],
node_color=attr[node][0],
node_size=attr[node][1],
alpha=0.8)
nx.draw_networkx_edges(self.G, pos, width=1.0, alpha=0.5)
for key, value in pos.items():
pos[key][1] += 0.01
if label == True:
nx.draw_networkx_labels(self.G, pos, labels, font_size=7)
plt.axis('off')
f = tempfile.NamedTemporaryFile(
dir='out/sna',
suffix='.png', delete=False)
# save the figure to the temporary file
plt.savefig(f, bbox_inches='tight')
f.close() # close the file
# get the file's name
# (the template will need that)
plotPng = f.name.split('/')[-1]
plotPng = plotPng.split('\\')[-1]
return plotPng
# create json file for 3 dimensional graph
# name ex: {name, institution}, {faction leaders, institution}, etc...
# color: {"0xgggggg", "0xaaaaaa"} etc. (Takes a hexadecimal "String").
# returns a json dictionary
def create_json(self, classes, color, graph=None):
data = {}
edges = []
nodes_property = {}
if graph is None:
graph = self.G
for edge in self.G.edges_iter():
if graph[edge[0]][edge[1]].get('Emotion') is not None:
# links with propensities can be given hex code colors for arrow, edge; can also change arrow size
edges.append(
{'source': edge[0],
'target': edge[1],
'name': edge[0] + "," + edge[1],
'arrowColor': '0xE74C3C',
'arrowSize': 2})
if graph[edge[0]][edge[1]].get('Predicted') is not None:
edges.append(
{'source': edge[0],
'target': edge[1],
'name': edge[0] + "," + edge[1],
'color': '0xE74C3C',
'arrowColor': '0xE74C3C',
'arrowSize': 2})
if graph[edge[0]][edge[1]].get('W') is not None:
edges.append(
{'source': edge[0],
'target': edge[1],
'name': edge[0] + "," + edge[1],
'arrowColor': '0x32CD32',
'arrowSize': 2})
else:
edges.append(
{'source': edge[0],
'target': edge[1],
'name': edge[0] + "," + edge[1]}) #TODO clean up repeated code above
for node in self.G.nodes_iter():
temp = {}
ontClass = self.G.node[node].get('ontClass')
if graph.node[node].get('newNode') is True:
temp['color'] = '0x8B0000'
else:
if ontClass is None:
temp['color'] = '0xD3D3D3'
else:
temp['color'] = color[classes.index(ontClass)]
if graph.node[node].get('Name') is not None:
temp['name'] = graph.node[node].get('Name')[0]
nodes_property[node] = temp
data['edges'] = edges
data['nodes'] = nodes_property
return data | mit |
abimannans/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
amanzotti/algorithm_coursera | percolation.py | 1 | 3417 | import union_find
import numpy as np
class Percolation(object):
"""docstring for Percolation
1==closed
0==open
"""
def __init__(self, n):
super(Percolation, self).__init__()
assert n>=0
self.n = n
self.matrix = np.ones((n,n),dtype=np.int)
self.union_grid = union_find.union_grid_improved(n*n+2) # 2 more to allocate the virtual node at the top and at the bottom.
# link bottom and top row to virtual.
for x in np.arange(0,n):
self.union_grid.connect_slow(x,n*n)
for x in np.arange(n*n-n,n*n):
self.union_grid.connect_slow(x,n*n+1)
def open_site(self,i):
assert i < self.n*self.n and i>=0, 'index out of boundary'
p,q =np.unravel_index( i, (self.n,self.n))
# print p,q
if self.matrix[p,q]==0:
# print 'return'
return
self.matrix[p,q]=0
if 0<=p+1<self.n and self.matrix[p+1,q]==0 :
# print np.ravel_multi_index([[p+1],[q]],(self.n,self.n)),p+1,q
self.union_grid.connect_slow(i,np.int(np.ravel_multi_index([[p+1],[q]], (self.n,self.n))))
if 0<=p-1<self.n and self.matrix[p-1,q]==0 :
# print np.ravel_multi_index([[p-1],[q]],(self.n,self.n)),p-1,q
self.union_grid.connect_slow(i,np.int(np.ravel_multi_index([[p-1],[q]], (self.n,self.n))))
if 0<=q+1<self.n and self.matrix[p,q+1]==0:
# print np.ravel_multi_index([[p],[q+1]],(self.n,self.n)),p,q+1
self.union_grid.connect_slow(i,np.int(np.ravel_multi_index([[p],[q+1]], (self.n,self.n))))
if 0<=q-1<self.n and self.matrix[p,q-1]==0:
# print np.ravel_multi_index([[p],[q-1]],(self.n,self.n)),p,q-1
self.union_grid.connect_slow(i,np.int(np.ravel_multi_index([[p],[q-1]], (self.n,self.n))))
def is_close(self,i):
assert i < self.n*self.n and i>=0, 'index out of boundary'
p,q =np.unravel_index( i, (self.n,self.n))
return self.matrix[p,q] == 1
def is_open(self,i):
assert i < self.n*self.n and i>=0, 'index out of boundary'
p,q =np.unravel_index( i, (self.n,self.n))
return self.matrix[p,q] == 0
def is_full(self,i):
assert i < self.n*self.n and i>=0, 'index out of boundary'
return self.union_grid.is_connected_quick(i,self.n*self.n)
def is_percolating(self):
return self.union_grid.is_connected_quick(self.n*self.n,self.n*self.n+1)
# GRaphic
def plot_board(self,fig_n):
import matplotlib.pyplot as plt
plt.matshow(np.invert(self.matrix), fignum=fig_n, cmap=plt.cm.gray)
if __name__ == '__main__':
import time
import union_find
import numpy as np
import matplotlib.pyplot as plt
import percolation
n= 25
ratio = []
for x in np.arange(1,200):
indeces = np.random.permutation(n*n)
perc = percolation.Percolation(n)
i=0
# plt.figure(1)
while (not perc.is_percolating()):
perc.open_site(indeces[i])
# perc.plot_board(fig_n=1)
# plt.show()
# time.sleep(3)
# print perc.is_percolating()
i+=1
print 1-np.sum(perc.matrix)/float(np.size(perc.matrix))
ratio.append(1-np.sum(perc.matrix)/float(np.size(perc.matrix)))
# plt.savefig('test{}.pdf'.format(i))
print 'mean' , np.mean(ratio)
| gpl-2.0 |
BasuruK/sGlass | Indoor_Object_Recognition_Engine/IOIM/Indoor_Object_Recognition_System.py | 1 | 8640 |
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from keras import backend as K
K.set_image_dim_ordering('tf')
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense,Dropout,Activation,Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam,adadelta
import os.path
from keras.models import load_model
USE_SKLEARN_PREPROCESSING=False
from Dialogue_Manager.settings_manager import SettingsManager
from config import Configurations
from sklearn import preprocessing
class Indoor_Object_Recognition_System:
img_rows = 128
img_colms = 128
num_channels = 3
num_epoch = 6
img_data_scaled = None
img_name = None
SettingsController = None
ConfigurationsController = None
IMPORT_MANAGER = None
def __init__(self, import_manager,objectdatasetpath=None):
self.objectdatasetpath=objectdatasetpath
self.SettingsController = SettingsManager()
self.ConfigurationsController = Configurations()
self.IMPORT_MANAGER = import_manager
def image_to_feature_vector(image,size=(128,128)):
# resize the image to a fixed size , then flttern the image into a list of raw pixels intensities
return cv2.resize(image,size).flatten()
def train_indoorObjectRecognition_CNN(self):
if USE_SKLEARN_PREPROCESSING:
img_data=self.img_data_scaled
PATH = os.getcwd()
# Define data path
data_path = PATH + self.objectdatasetpath
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
print(data_dir_list)
img_list=os.listdir(data_path+'/'+dataset)
print('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
input_img=cv2.imread(data_path+'/'+dataset+'/'+img)
input_img_flatten = self.image_to_feature_vector(input_img, (128, 128))
img_data_list.append(input_img_flatten)
img_data=np.array(img_data_list)
img_data=img_data.astype('float')
print('Image Data',img_data.shape)
if self.num_channels==1:
if K.image_dim_ordering()=='th':
img_data=np.expand_dims(img_data,axis=1)
print('Image Data BnW',img_data.shape)
else:
img_data=np.expand_dims(img_data,axis=4)
print('Image Data BnW',img_data.shape)
else:
if K.image_dim_ordering()=='th':
img_data=np.rollaxis(img_data,3,1)
print('Image Data RGB',img_data.shape)
image_data_scaled=preprocessing.scale(img_data)
print("Image Data Scaled" , image_data_scaled)
if K.image_dim_ordering()=='th':
image_data_scaled=image_data_scaled.reshape(img_data.shape[0],self.num_channels,self.img_rows,self.img_colms)
print('Image Data Scaled BnW',image_data_scaled.shape)
else:
image_data_scaled=image_data_scaled.reshape(img_data.shape[0],self.img_rows,self.img_colms,self.num_channels)
print('Image Data Scaled RGB',image_data_scaled.shape)
# Define classes
num_classes=2
num_samples=img_data.shape[0]
labels=np.ones((num_samples,),dtype='int64')
labels[0:33]=0
labels[33:63]=1
names=['bottel','mug']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels,num_classes)
x, y = shuffle(img_data,Y,random_state=2)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=4)
X_train = X_train.reshape(X_train.shape[0], self.img_colms, self.img_rows, -1)
X_test = X_test.reshape(X_test.shape[0], self.img_colms, self.img_rows, -1)
input_shape = (self.img_colms, self.img_rows, 1)
# Defining the model
input_shape = img_data[0].shape
model = Sequential()
model.add(Conv2D(32, (3, 3), border_mode='same', input_shape=(128, 128, 3)))
model.add(Activation('relu'))
model.add(Conv2D(32, ( 3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2),dim_ordering='th'))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3),dim_ordering='th'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2),dim_ordering='th'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(512)) #no of hidden layers
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Viewing model_configuration
model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable
# Training
if os.path.isfile('Indoor_Object_Recognition.h5') == False:
hist = model.fit(X_train, y_train, batch_size=150, epochs=self.num_epoch, verbose=1, validation_data=(X_test, y_test))
model.save('Indoor_Object_Recognition.h5')
else:
hist=load_model('Indoor_Object_Recognition.h5')
# Evaluating the model
score = model.evaluate(X_test, y_test, batch_size=150 , verbose=0)
print('Test Loss:', score[0])
print('Test accuracy:', score[1])
test_image = X_test[0:1]
y_test[0:1]
return model
def capture_IO_image(self):
cam=cv2.VideoCapture(0)
save = 'C:/Users/Nipuni/AnacondaProjects/Indoor_Object_Identification_System/IOIM/Indoor Objects/Test'
cv2.namedWindow("test")
img_counter=0
while True:
ret, frame=cam.read()
cv2.imshow("test",frame)
if not ret:
break
k=cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
self.img_name="indoor_object_{}.png".format(img_counter)
return_value, image = cam.read()
cv2.imwrite(os.path.join(save, self.img_name), image)
print("{} written!".format(self.img_name))
img_counter +=1
cam.release()
cv2.destroyAllWindows()
return self.img_name
def predict_object_class(self, path):
# Predicting the test image
test_image = cv2.imread(path)
test_image=cv2.resize(test_image,(128,128))
test_image = np.array(test_image)
test_image = test_image.astype('float32')
test_image /= 255
if self.num_channels == 1:
if K.image_dim_ordering()=='th':
test_image = np.expand_dims(test_image, axis=1)
else:
test_image= np.expand_dims(test_image, axis=4)
else:
if K.image_dim_ordering()=='th':
test_image=np.rollaxis(test_image,3,1)
else:
test_image= np.expand_dims(test_image, axis=0)
objectModel = self.IMPORT_MANAGER.load_cnn_model()
# Predicting the test image
print(objectModel.predict(test_image))
predict_class=objectModel.predict_classes(test_image)
return predict_class
def predict_singleObject(self):
indoor_object = self.capture_IO_image()
path = os.path.abspath(indoor_object)
predict_class = None
prediction = self.predict_object_class(
path="Indoor_Object_Recognition_Engine/IOIM/Indoor Objects/Test/indoor_object_0.png")
if prediction == [0]:
predict_class = 'Bottle'
print("This is a Bottle")
elif prediction == [1]:
predict_class = 'Mug'
print("This is a Mug")
return predict_class
def predict_objects(self, image):
classification_model = self.IMPORT_MANAGER.load_cnn_model()
prediction = classification_model.predict_classes(image)
return prediction
| gpl-3.0 |
cactorium/UCFBrainStuff | old/old_py/brainstuff.py | 1 | 1746 | # This is an example of popping a packet from the Emotiv class's packet queue
# and printing the gyro x and y values to the console.
from emokit.emotiv import Emotiv
import platform
if platform.system() == "Windows":
import socket # Needed to prevent gevent crashing on Windows. (surfly / gevent issue #459)
import gevent
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
is_running = True
# TODO: is_running is not working as expected. But it DOES work!
def evt_main():
global is_running
ring_buf = np.zeros(x.size)
headset = Emotiv()
gevent.spawn(headset.setup)
gevent.sleep(0)
pos = 0
try:
while is_running:
packet = headset.dequeue()
print packet.gyro_x, packet.gyro_y
ring_buf[pos] = packet.sensors["O2"]["value"]
pos = (pos + 1) % ring_buf.size
if pos % 4 == 0:
yield np.concatenate((ring_buf[pos:ring_buf.size:1], ring_buf[0:pos:1]))
gevent.sleep(0)
except KeyboardInterrupt:
headset.close()
finally:
is_running = False
headset.close()
x = np.arange(0, 4096)
test_buf = np.zeros(x.size)
fig, ax = plt.subplots()
line, = ax.plot(x, test_buf)
plt.axis([0, x.size - 1, -8192, 8191])
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(rb):
dft = np.fft.fft(rb)
line.set_ydata(np.square(np.absolute(dft)))
return line,
def counter():
global is_running
i = 0
while is_running:
yield i
i = i + 1
ani = animation.FuncAnimation(fig, animate, evt_main, init_func=init, interval=20, blit=True)
plt.show()
is_running = False
while True:
gevent.sleep(0)
| mit |
pv/scikit-learn | sklearn/utils/estimator_checks.py | 41 | 47834 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
vimilimiv/weibo-popularity_judge-and-content_optimization | 分类和回归/newcla.py | 1 | 2399 | #-------------------------------------------------------------------------------
# coding=utf8
# Name: 模块1
# Purpose:
#
# Author: zhx
#
# Created: 19/05/2016
# Copyright: (c) zhx 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import numpy as np
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
def fun(a):
if a==1:
return 1
else:
return -1
def main():
traindata = open("trainnew.txt")
testdata = open("testnew.txt")
traindata.readline() # 跳过第一行
testdata.readline()
train = np.loadtxt(traindata)
test = np.loadtxt(testdata)
X = train[0:4628,0:27]
y = train[0:4628,27]
test_x = test[0:1437,0:27]
test_y = test[0:1437,27]
model1 = LinearSVC()
model2 = LogisticRegression()
model3 = GaussianNB()
model4 = RandomForestClassifier()
model5 = KNeighborsClassifier()
model1.fit(X,y)
model2.fit(X,y)
model3.fit(X,y)
model4.fit(X,y)
model5.fit(X,y)
predicted1 = model1.predict(test_x)
predicted2 = model2.predict(test_x)
predicted3 = model3.predict(test_x)
predicted4 = model4.predict(test_x)
predicted5 = model5.predict(test_x)
mypredict = predicted1
for i in xrange(1437):
p1 = predicted1[i]
p2 = predicted2[i]
p3 = predicted3[i]
p4 = predicted4[i]
p5 = predicted5[i]
if (p1+p2+p4)==3:
mypredict[i]=1
elif (p1+p2+p4)==2:
if p3+p5>=1:
mypredict[i]=1
else:
mypredict[i]=0
else:
mypredict[i]=0
classname = ['popular','not_popular']
print "1 Svm-linear"
print(classification_report(test_y,predicted1))#,classname))
print "2 Logistci regression"
print(classification_report(test_y,predicted2))#,classname))
print "3 NB - gaussian"
print(classification_report(test_y,predicted3))#,classname))
print "4 Random Forest"
print(classification_report(test_y,predicted4))#,classname))
print "5 KNN"
print(classification_report(test_y,predicted5))#,classname))
print "6 Com"
print(classification_report(test_y,mypredict))#,classname))
main()
| mit |
zplab/zplib | zplib/image/sample_texture.py | 2 | 2094 | import numpy
from sklearn import cluster
from . import _sample_texture
sample_texture = _sample_texture.sample_texture
sample_ar_texture = _sample_texture.sample_ar_texture
def subsample_mask(mask, max_points):
"""Return a mask containing at most max_points 'True' values, each of which
is located somewhere within the original mask.
This is useful for sampling textures where it is neither necessary nor practical
to sample EVERY pixel of potential interest. Instead, a random subset of the
pixels of interest is selected.
"""
mask = numpy.asarray(mask) > 0
num_points = mask.sum()
if num_points > max_points:
z = numpy.zeros(num_points, dtype=bool)
z[:max_points] = 1
mask = mask.copy()
mask[mask] = numpy.random.permutation(z)
return mask
def bin_by_texture_class(image, num_classes, mask=None, size=3):
"""Return an image where pixels are replaced by the "texture class" that
that pixel belongs to.
Textures are sampled using sample_ar_texture and then clustered with k-means
clustering. An image is returned where each pixel represents the label of
its texture cluster.
Parameters:
image: 2-dimensional numpy array of type uint8, uint16, or float32
num_classes: number of clusters to identify with k-means
mask: optional mask for which pixels to examine
size: size of the ar feature window (see sample_ar_texture)
"""
texture_samples = sample_ar_texture(image, mask, size)
kmeans = cluster.MiniBatchKMeans(n_clusters=64, max_iter=300)
kmeans.fit(texture_samples)
dtype = numpy.uint16 if num_classes > 256 else numpy.uint8
labeled_image = numpy.zeros(image.shape, dtype)
# if not image.flags.fortran:
# labeled_image = labeled_image.T
# if mask is not None:
# mask = mask.T
if mask is not None:
labeled_image[mask] = kmeans.labels_
else:
labeled_image.flat = kmeans.labels_
# if not image.flags.fortran:
# labeled_image = labeled_image.T
return labeled_image | mit |
mayavanand/RMMAFinalProject | build/lib/azimuth/predict.py | 1 | 21618 | import numpy as np
import sklearn
from sklearn.metrics import roc_curve, auc
import sklearn.metrics
import sklearn.cross_validation
import copy
import util
import time
import metrics as ranking_metrics
import azimuth.models.regression
import azimuth.models.ensembles
import azimuth.models.DNN
import azimuth.models.baselines
import multiprocessing
def fill_in_truth_and_predictions(truth, predictions, fold, y_all, y_pred, learn_options, test):
truth[fold]['ranks'] = np.hstack((truth[fold]['ranks'],
y_all[learn_options['rank-transformed target name']].values[test].flatten()))
truth[fold]['thrs'] = np.hstack((truth[fold]['thrs'],
y_all[learn_options['binary target name']].values[test].flatten()))
if 'raw_target_name' in learn_options.keys():
truth[fold]['raw'] = np.hstack((truth[fold]['raw'],
y_all[learn_options['raw target name']].values[test].flatten()))
predictions[fold] = np.hstack((predictions[fold], y_pred.flatten()))
return truth, predictions
def construct_filename(learn_options, TEST):
if learn_options.has_key("V"):
filename = "V%s" % learn_options["V"]
else:
filename = "offV1"
if TEST:
filename = "TEST."
filename += learn_options["method"]
filename += '.order%d' % learn_options["order"]
# try:
# learn_options["target_name"] = ".%s" % learn_options["target_name"].split(" ")[1]
# except:
# pass
filename += learn_options["target_name"]
if learn_options["method"] == "GPy":
pass
# filename += ".R%d" % opt_options['num_restarts']
# filename += ".K%s" % learn_options['kerntype']
# if learn_options.has_key('degree'):
# filename += "d%d" % learn_options['degree']
# if learn_options['warped']:
# filename += ".Warp"
elif learn_options["method"] == "linreg":
filename += "." + learn_options["penalty"]
filename += "." + learn_options["cv"]
if learn_options["training_metric"] == "NDCG":
filename += ".NDGC_%d" % learn_options["NDGC_k"]
elif learn_options["training_metric"] == "AUC":
filename += ".AUC"
elif learn_options["training_metric"] == 'spearmanr':
filename += ".spearman"
print "filename = %s" % filename
return filename
def print_summary(global_metric, results, learn_options, feature_sets, flags):
print "\nSummary:"
print learn_options
print "\t\tglobal %s=%.2f" % (learn_options['metric'], global_metric)
print "\t\tmedian %s across folds=%.2f" % (learn_options['metric'], np.median(results[0]))
print "\t\torder=%d" % learn_options["order"]
if learn_options.has_key('kerntype'): "\t\tkern type = %s" % learn_options['kerntype']
if learn_options.has_key('degree'): print "\t\tdegree=%d" % learn_options['degree']
print "\t\ttarget_name=%s" % learn_options["target_name"]
for k in flags.keys():
print '\t\t' + k + '=' + str(learn_options[k])
print "\t\tfeature set:"
for set in feature_sets.keys():
print "\t\t\t%s" % set
print "\t\ttotal # features=%d" % results[4]
def extract_fpr_tpr_for_fold(aucs, fold, i, predictions, truth, y_binary, test, y_pred):
assert len(np.unique(y_binary))<=2, "if using AUC need binary targets"
fpr, tpr, _ = roc_curve(y_binary[test], y_pred)
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
def extract_NDCG_for_fold(metrics, fold, i, predictions, truth, y_ground_truth, test, y_pred, learn_options):
NDCG_fold = ranking_metrics.ndcg_at_k_ties(y_ground_truth[test].flatten(), y_pred.flatten(), learn_options["NDGC_k"])
metrics.append(NDCG_fold)
def extract_spearman_for_fold(metrics, fold, i, predictions, truth, y_ground_truth, test, y_pred, learn_options):
spearman = util.spearmanr_nonan(y_ground_truth[test].flatten(), y_pred.flatten())[0]
assert not np.isnan(spearman), "found nan spearman"
metrics.append(spearman)
def get_train_test(test_gene, y_all, train_genes=None):
# this is a bit convoluted because the train_genes+test_genes may not add up to all genes
# for e.g. when we load up V3, but then use only V2, etc.
is_off_target = 'MutatedSequence' in y_all.index.names
if is_off_target:
train = (y_all.index.get_level_values('MutatedSequence').values != test_gene)
test = None
return train, test
not_test = (y_all.index.get_level_values('Target gene').values != test_gene)
if train_genes is not None:
in_train_genes = np.zeros(not_test.shape, dtype=bool)
for t_gene in train_genes:
in_train_genes = np.logical_or(in_train_genes, (y_all.index.get_level_values('Target gene').values == t_gene))
train = np.logical_and(not_test, in_train_genes)
else:
train = not_test
#y_all['test'] as to do with extra pairs in V2
test = (y_all.index.get_level_values('Target gene').values== test_gene) * (y_all['test'].values == 1.)
# convert to indices
test = np.where(test == True)[0]
train = np.where(train == True)[0]
return train, test
def cross_validate(y_all, feature_sets, learn_options=None, TEST=False, train_genes=None, CV=True):
'''
feature_sets is a dictionary of "set name" to pandas.DataFrame
one set might be single-nucleotide, position-independent features of order X, for e.g.
Method: "GPy" or "linreg"
Metric: NDCG (learning to rank metric, Normalized Discounted Cumulative Gain); AUC
Output: cv_score_median, gene_rocs
When CV=False, it trains on everything (and tests on everything, just to fit the code)
'''
print "range of y_all is [%f, %f]" % (np.min(y_all[learn_options['target_name']].values), np.max(y_all[learn_options['target_name']].values))
allowed_methods = ["GPy", "linreg", "AdaBoostRegressor", "AdaBoostClassifier",
"DecisionTreeRegressor", "RandomForestRegressor",
"ARDRegression", "GPy_fs", "mean", "random", "DNN",
"lasso_ensemble", "doench", "logregL1", "sgrna_from_doench", 'SVC', 'xu_et_al']
assert learn_options["method"] in allowed_methods,"invalid method: %s" % learn_options["method"]
assert learn_options["method"] == "linreg" and learn_options['penalty'] == 'L2' or learn_options["weighted"] is None, "weighted only works with linreg L2 right now"
# construct filename from options
filename = construct_filename(learn_options, TEST)
print "Cross-validating genes..."
t2 = time.time()
y = np.array(y_all[learn_options["target_name"]].values[:,None],dtype=np.float64)
# concatenate feature sets in to one nparray, and get dimension of each
inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets)
if not CV:
assert learn_options['cv'] == 'gene', 'Must use gene-CV when CV is False (I need to use all of the genes and stratified complicates that)'
# set-up for cross-validation
## for outer loop, the one Doench et al use genes for
if learn_options["cv"] == "stratified":
assert not learn_options.has_key("extra_pairs") or learn_options['extra pairs'], "can't use extra pairs with stratified CV, need to figure out how to properly account for genes affected by two drugs"
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values)
gene_classes = label_encoder.transform(y_all['Target gene'].values)
if 'n_folds' in learn_options.keys():
n_folds = learn_options['n_folds']
elif learn_options['train_genes'] is not None and learn_options["test_genes"] is not None:
n_folds = len(learn_options["test_genes"])
else:
n_folds = len(learn_options['all_genes'])
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
fold_labels = ["fold%d" % i for i in range(1,n_folds+1)]
if learn_options['num_genes_remove_train'] is not None:
raise NotImplementedException()
elif learn_options["cv"]=="gene":
cv = []
if not CV:
train_test_tmp = get_train_test('dummy', y_all) # get train, test split using a dummy gene
train_tmp, test_tmp = train_test_tmp
# not a typo, using training set to test on as well, just for this case. Test set is not used
# for internal cross-val, etc. anyway.
train_test_tmp = (train_tmp, train_tmp)
cv.append(train_test_tmp)
fold_labels = learn_options['all_genes']
elif learn_options['train_genes'] is not None and learn_options["test_genes"] is not None:
assert learn_options['train_genes'] is not None and learn_options['test_genes'] is not None, "use both or neither"
for i, gene in enumerate(learn_options['test_genes']):
cv.append(get_train_test(gene, y_all, learn_options['train_genes']))
fold_labels = learn_options["test_genes"]
# if train and test genes are seperate, there should be only one fold
train_test_disjoint = set.isdisjoint(set(learn_options["train_genes"].tolist()), set(learn_options["test_genes"].tolist()))
else:
for i, gene in enumerate(learn_options['all_genes']):
train_test_tmp = get_train_test(gene, y_all)
cv.append(train_test_tmp)
fold_labels = learn_options['all_genes']
if learn_options['num_genes_remove_train'] is not None:
for i, (train,test) in enumerate(cv):
unique_genes = np.random.permutation(np.unique(np.unique(y_all['Target gene'][train])))
genes_to_keep = unique_genes[0:len(unique_genes) - learn_options['num_genes_remove_train']]
guides_to_keep = []
filtered_train = []
for j, gene in enumerate(y_all['Target gene']):
if j in train and gene in genes_to_keep:
filtered_train.append(j)
cv_i_orig = copy.deepcopy(cv[i])
cv[i] = (filtered_train, test)
if learn_options['num_genes_remove_train']==0:
assert np.all(cv_i_orig[0]==cv[i][0])
assert np.all(cv_i_orig[1]==cv[i][1])
print "# train/train after/before is %s, %s" % (len(cv[i][0]), len(cv_i_orig[0]))
print "# test/test after/before is %s, %s" % (len(cv[i][1]), len(cv_i_orig[1]))
else:
raise Exception("invalid cv options given: %s" % learn_options["cv"])
cv = [c for c in cv] #make list from generator, so can subset for TEST case
if TEST:
ind_to_use = [0]#[0,1]
cv = [cv[i] for i in ind_to_use]
fold_labels = [fold_labels[i] for i in ind_to_use]
truth = dict([(t, dict([(m, np.array([])) for m in ['raw', 'ranks', 'thrs']])) for t in fold_labels])
predictions = dict([(t, np.array([])) for t in fold_labels])
m = {}
metrics = []
#do the cross-validation
num_proc = learn_options["num_proc"]
if num_proc > 1:
num_proc = np.min([num_proc,len(cv)])
print "using multiprocessing with %d procs--one for each fold" % num_proc
jobs = []
pool = multiprocessing.Pool(processes=num_proc)
for i,fold in enumerate(cv):
train,test = fold
print "working on fold %d of %d, with %d train and %d test" % (i, len(cv), len(train), len(test))
if learn_options["method"]=="GPy":
job = pool.apply_async(azimuth.models.GP.gp_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="linreg":
job = pool.apply_async(azimuth.models.regression.linreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="logregL1":
job = pool.apply_async(azimuth.models.regression.logreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="AdaBoostRegressor":
job = pool.apply_async(azimuth.models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, False))
elif learn_options["method"]=="AdaBoostClassifier":
job = pool.apply_async(azimuth.models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, True))
elif learn_options["method"]=="DecisionTreeRegressor":
job = pool.apply_async(azimuth.models.ensembles.decisiontree_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="RandomForestRegressor":
job = pool.apply_async(azimuth.models.ensembles.randomforest_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="ARDRegression":
job = pool.apply_async(azimuth.models.regression.ARDRegression_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "random":
job = pool.apply_async(azimuth.models.baselines.random_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "mean":
job = pool.apply_async(azimuth.models.baselines.mean_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "SVC":
job = pool.apply_async(azimuth.models.baselines.SVC_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "DNN":
job = pool.apply_async(azimuth.models.DNN.DNN_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "lasso_ensemble":
job = pool.apply_async(azimuth.models.ensembles.LASSOs_ensemble_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "doench":
job = pool.apply_async(azimuth.models.baselines.doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "sgrna_from_doench":
job = pool.apply_async(azimuth.models.baselines.sgrna_from_doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "xu_et_al":
job = pool.apply_async(azimuth.models.baselines.xu_et_al_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
else:
raise Exception("did not find method=%s" % learn_options["method"])
jobs.append(job)
pool.close()
pool.join()
for i,fold in enumerate(cv):#i in range(0,len(jobs)):
y_pred, m[i] = jobs[i].get()
train,test = fold
if learn_options["training_metric"]=="AUC":
extract_fpr_tpr_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred)
elif learn_options["training_metric"]=="NDCG":
extract_NDCG_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
elif learn_options["training_metric"] == 'spearmanr':
extract_spearman_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
else:
raise Exception("invalid 'training_metric' in learn_options: %s" % learn_options["training_metric"])
truth, predictions = fill_in_truth_and_predictions(truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test)
pool.terminate()
else:
# non parallel version
for i,fold in enumerate(cv):
train,test = fold
if learn_options["method"]=="GPy":
y_pred, m[i] = gp_on_fold(azimuth.models.GP.feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="linreg":
y_pred, m[i] = azimuth.models.regression.linreg_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="logregL1":
y_pred, m[i] = azimuth.models.regression.logreg_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="AdaBoostRegressor":
y_pred, m[i] = azimuth.models.ensembles.adaboost_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=False)
elif learn_options["method"]=="AdaBoostClassifier":
y_pred, m[i] = azimuth.models.ensembles.adaboost_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=True)
elif learn_options["method"]=="DecisionTreeRegressor":
y_pred, m[i] = azimuth.models.ensembles.decisiontree_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="RandomForestRegressor":
y_pred, m[i] = azimuth.models.ensembles.randomforest_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="ARDRegression":
y_pred, m[i] = azimuth.models.regression.ARDRegression_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="GPy_fs":
y_pred, m[i] = azimuth.models.GP.gp_with_fs_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "random":
y_pred, m[i] = azimuth.models.baselines.random_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "mean":
y_pred, m[i] = azimuth.models.baselines.mean_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "SVC":
y_pred, m[i] = azimuth.models.baselines.SVC_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "DNN":
y_pred, m[i] = azimuth.models.DNN.DNN_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "lasso_ensemble":
y_pred, m[i] = azimuth.models.ensembles.LASSOs_ensemble_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "doench":
y_pred, m[i] = azimuth.models.baselines.doench_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "sgrna_from_doench":
y_pred, m[i] = azimuth.models.baselines.sgrna_from_doench_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "xu_et_al":
y_pred, m[i] = azimuth.models.baselines.xu_et_al_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
else:
raise Exception("invalid method found: %s" % learn_options["method"])
if learn_options["training_metric"]=="AUC":
# fills in truth and predictions
extract_fpr_tpr_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options['ground_truth_label']].values, test, y_pred)
elif learn_options["training_metric"]=="NDCG":
extract_NDCG_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
elif learn_options["training_metric"] == 'spearmanr':
extract_spearman_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
truth, predictions = fill_in_truth_and_predictions(truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test)
print "\t\tRMSE: ", np.sqrt(((y_pred - y[test])**2).mean())
print "\t\tSpearman correlation: ", util.spearmanr_nonan(y[test], y_pred)[0]
print "\t\tfinished fold/gene %i of %i" % (i, len(fold_labels))
cv_median_metric =[np.median(metrics)]
gene_pred = [(truth, predictions)]
print "\t\tmedian %s across gene folds: %.3f" % (learn_options["training_metric"], cv_median_metric[-1])
t3 = time.time()
print "\t\tElapsed time for cv is %.2f seconds" % (t3-t2)
return metrics, gene_pred, fold_labels, m, dimsum, filename, feature_names
| bsd-3-clause |
Mazecreator/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 37 | 3774 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
krez13/scikit-learn | examples/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| bsd-3-clause |
boada/desCluster | legacy/analysis/dstest.py | 2 | 4702 | import numpy as np
from sklearn.neighbors import NearestNeighbors
from astLib import astStats
def DSmetric(localData, v, sigma):
""" Calculates the delta squared values as given in equation 1 of Dressler
et al. 1988. This uses both the position and velocities to give a measure
of substructure by identifying galaxies with do not follow the cluster
velocity distribution.
@type localData: list
@param localData: The velocities of the target galaxy and its N nearest
neighbors.
@type v: float
@param v: The global mean velocity of the cluster
@type sigma: float
@param sigma: The global velocity dispersion of the cluster
@rtype: list
@return: a list of delta squared values
"""
if 3 <= localData.shape[0] < 15:
sigmaLocal = astStats.gapperEstimator(localData)
#sigmaLocal = np.std(localData['LOSV'])
elif 15 <= localData.shape[0]:
sigmaLocal = astStats.biweightScale(localData, tuningConstant=9.0)
#sigmaLocal = np.std(localData['LOSV'])
try:
vLocal = astStats.biweightLocation(localData, tuningConstant=6.0)
#vLocal = localData.mean()
if vLocal == None:
vLocal = localData.mean()
except ZeroDivisionError:
print 'exception'
vLocal = localData.mean()
Nnn = localData.shape[0]
delta2 = (Nnn + 1)/ sigma**2 * ((vLocal - v)**2 + (sigmaLocal - sigma)**2)
return delta2
def DStest(data, LOSV, LOSVD, method='shuffle', shuffles=1e4):
""" Computes the delta deviation for an entire cluster. See Dressler et al.
1988 for a complete description. A result close to the number of galaxies
present in the cluster indicates no substructure.
@type data: array
@param data: The 2D (3D) array of RA, DEC, (z) galaxy positions.
@type LOSV: array
@param LOSV: Array of line of sight velocities (LOSV) for each galaxy in
cluster
@type LOSVD: float
@param LOSVD: Global line of sight velocity dispersion for the cluster.
@type method: string
@param method: The method used for the determination of substructure.
Should either be 'shuffle' (default) or 'threshold'. 'Shuffle' shuffles
the velocities and computes the probability of substructure.
'threshold' uses a simple threshold and is generally considered not as
reliable as the shuffle method.
@type shuffles: float
@param shuffles: The number of times to shuffle the velocities.
@rtype: float
@rparam: The significance of substructure. For method='shuffle' this is the
probability of substructure, and for method='threshold' a value greater
than 1 indicates the presence of substructure.
"""
try:
data.shape[1]
except IndexError:
raise IndexError('data must be at least 2D')
nbrs = NearestNeighbors(n_neighbors=int(np.sqrt(len(data))),
algorithm='ball_tree').fit(data)
distances, indices = nbrs.kneighbors(data)
ds2 = [DSmetric(LOSV[inds], LOSV.mean(), LOSVD) for inds in indices]
ds = np.sqrt(ds2)
delta = np.sum(ds)
if method == 'threshold':
return delta/data.shape[0]
elif method == 'shuffle':
deltaShuffled = np.zeros(shuffles)
for i in range(int(shuffles)):
np.random.shuffle(LOSV)
ds2 = [DSmetric(LOSV[inds], LOSV.mean(), LOSVD) for inds in
indices]
ds = np.sqrt(ds2)
deltaShuffled[i] = np.sum(ds)
mask = deltaShuffled > delta
return deltaShuffled[mask].shape[0]/float(shuffles)
else:
raise NameError("method must be either 'threshold' or 'shuffle'")
def findLOSV(data, CLUSZ):
''' Finds the line of sight velocity for each of the galaxies and puts it
in the LOSV column of the data array.
'''
c = 2.99e5 # speed of light in km/s
losv = c * (data - CLUSZ)/(1 + CLUSZ)
return losv
def main():
N = 100
N2 = 1
# make some data
ra1 = 0 + np.random.rand(N) * 0.5
dec1 = -1 - np.random.rand(N) * 0.5
z1 = 0.2 + np.random.rand(N) * 0.01
# add another little cluster
ra2 = 0.4 + np.random.rand(N2) * 0.1
dec2 = -1.1 - np.random.rand(N2) * 0.1
z2 = 0.21 + np.random.rand(N2) * 0.01
ra = np.append(ra1, ra2)
dec = np.append(dec1, dec2)
z = np.append(z1, z2)
#calculate things we would have normally
CLUSZ = astStats.biweightLocation(z, tuningConstant=6.0)
LOSV = findLOSV(z, CLUSZ)
LOSVD = astStats.biweightScale_test(LOSV, tuningConstant=9.0)
data = np.column_stack([ra,dec])
s = DStest(data, LOSV, LOSVD, shuffles=1000)
return s
if __name__ == "__main__":
main()
| mit |
fedebarabas/tormenta | tormenta/control/guitools.py | 1 | 6414 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 6 13:20:02 2015
@author: Federico Barabas
"""
import os
import numpy as np
import configparser
from ast import literal_eval
from tkinter import Tk, simpledialog
from lantz import Q_
from PIL import Image
import matplotlib.cm as cm
from scipy.misc import imresize
import tifffile as tiff
import tormenta.utils as utils
# Check for same name conflict
def getUniqueName(name):
n = 1
while os.path.exists(name):
if n > 1:
name = name.replace('_{}.'.format(n - 1), '_{}.'.format(n))
else:
name = utils.insertSuffix(name, '_{}'.format(n))
n += 1
return name
def attrsToTxt(filename, attrs):
fp = open(filename + '.txt', 'w')
fp.write('\n'.join('{}= {}'.format(x[0], x[1]) for x in attrs))
fp.close()
def fileSizeGB(shape):
# self.nPixels() * self.nExpositions * 16 / (8 * 1024**3)
return shape[0]*shape[1]*shape[2] / 2**29
def nFramesPerChunk(shape):
return int(1.8 * 2**29 / (shape[1] * shape[2]))
# Preset tools
def savePreset(main, filename=None):
if filename is None:
root = Tk()
root.withdraw()
filename = simpledialog.askstring(title='Save preset',
prompt='Save config file as...')
root.destroy()
if filename is None:
return
config = configparser.ConfigParser()
fov = 'Field of view'
config['Camera'] = {
'Frame Start': main.frameStart,
'Shape': main.shape,
'Shape name': main.tree.p.param(fov).param('Shape').value(),
'Horizontal readout rate': str(main.HRRatePar.value()),
'Vertical shift speed': str(main.vertShiftSpeedPar.value()),
'Clock voltage amplitude': str(main.vertShiftAmpPar.value()),
'Frame Transfer Mode': str(main.FTMPar.value()),
'Cropped sensor mode': str(main.cropParam.value()),
'Set exposure time': str(main.expPar.value()),
'Pre-amp gain': str(main.PreGainPar.value()),
'EM gain': str(main.GainPar.value())}
with open(os.path.join(main.presetDir, filename), 'w') as configfile:
config.write(configfile)
main.presetsMenu.addItem(filename)
def loadPreset(main, filename=None):
tree = main.tree.p
timings = tree.param('Timings')
if filename is None:
preset = main.presetsMenu.currentText()
config = configparser.ConfigParser()
config.read(os.path.join(main.presetDir, preset))
configCam = config['Camera']
shape = configCam['Shape']
main.shape = literal_eval(shape)
main.frameStart = literal_eval(configCam['Frame Start'])
# Frame size handling
shapeName = configCam['Shape Name']
if shapeName == 'Custom':
main.customFrameLoaded = True
tree.param('Field of view').param('Shape').setValue(shapeName)
main.frameStart = literal_eval(configCam['Frame Start'])
main.adjustFrame()
main.customFrameLoaded = False
else:
tree.param('Field of view').param('Shape').setValue(shapeName)
vps = timings.param('Vertical pixel shift')
vps.param('Speed').setValue(Q_(configCam['Vertical shift speed']))
cva = 'Clock voltage amplitude'
vps.param(cva).setValue(configCam[cva])
ftm = 'Frame Transfer Mode'
timings.param(ftm).setValue(configCam.getboolean(ftm))
csm = 'Cropped sensor mode'
if literal_eval(configCam[csm]) is not(None):
main.cropLoaded = True
timings.param(csm).param('Enable').setValue(configCam.getboolean(csm))
main.cropLoaded = False
hrr = 'Horizontal readout rate'
timings.param(hrr).setValue(Q_(configCam[hrr]))
expt = 'Set exposure time'
timings.param(expt).setValue(float(configCam[expt]))
pag = 'Pre-amp gain'
tree.param('Gain').param(pag).setValue(float(configCam[pag]))
tree.param('Gain').param('EM gain').setValue(int(configCam['EM gain']))
def hideColumn(main):
if main.hideColumnButton.isChecked():
main.presetsMenu.hide()
main.loadPresetButton.hide()
main.cameraWidget.hide()
main.viewCtrl.hide()
main.recWidget.hide()
main.layout.setColumnMinimumWidth(0, 0)
else:
main.presetsMenu.show()
main.loadPresetButton.show()
main.cameraWidget.show()
main.viewCtrl.show()
main.recWidget.show()
main.layout.setColumnMinimumWidth(0, 350)
def mouseMoved(main, pos):
if main.vb.sceneBoundingRect().contains(pos):
mousePoint = main.vb.mapSceneToView(pos)
x, y = int(mousePoint.x()), int(main.shape[1] - mousePoint.y())
main.cursorPos.setText('{}, {}'.format(x, y))
countsStr = '{} counts'.format(main.image[x, int(mousePoint.y())])
main.cursorPosInt.setText(countsStr)
def tiff2png(main, filenames=None):
if filenames is None:
filenames = utils.getFilenames('Load TIFF files',
[('Tiff files', '.tiff'),
('Tif files', '.tif')],
main.recWidget.folderEdit.text())
for filename in filenames:
with tiff.TiffFile(filename) as tt:
arr = tt.asarray()
cmin, cmax = bestLimits(arr)
arr[arr > cmax] = cmax
arr[arr < cmin] = cmin
arr -= arr.min()
arr = arr/arr.max()
arr = imresize(arr, (1000, 1000), 'nearest')
im = Image.fromarray(cm.cubehelix(arr, bytes=True))
im.save(os.path.splitext(filename)[0] + '.png')
def bestLimits(arr):
# Best cmin, cmax algorithm taken from ImageJ routine:
# http://cmci.embl.de/documents/120206pyip_cooking/
# python_imagej_cookbook#automatic_brightnesscontrast_button
pixelCount = arr.size
limit = pixelCount/10
threshold = pixelCount/5000
hist, bin_edges = np.histogram(arr, 256)
i = 0
found = False
count = 0
while True:
i += 1
count = hist[i]
if count > limit:
count = 0
found = count > threshold
if found or i >= 255:
break
hmin = i
i = 256
while True:
i -= 1
count = hist[i]
if count > limit:
count = 0
found = count > threshold
if found or i < 1:
break
hmax = i
return bin_edges[hmin], bin_edges[hmax]
| gpl-3.0 |
eg-zhang/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/numpy/linalg/linalg.py | 32 | 75738 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| mit |
ChristinaZografou/sympy | sympy/external/importtools.py | 85 | 7294 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
cwu2011/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
rs2/bokeh | bokeh/sampledata/tests/test_airport_routes.py | 2 | 2480 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
from bokeh.util.api import INTERNAL, PUBLIC ; INTERNAL, PUBLIC
from bokeh.util.testing import verify_api ; verify_api
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import pandas as pd
# Bokeh imports
from bokeh.util.testing import verify_all
# Module under test
#import bokeh.sampledata.airport_routes as bsa
#-----------------------------------------------------------------------------
# API Definition
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'airports',
'routes',
)
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.airport_routes", ALL))
@pytest.mark.sampledata
def test_airports():
import bokeh.sampledata.airport_routes as bsa
assert isinstance(bsa.airports, pd.DataFrame)
# don't check detail for external data
@pytest.mark.sampledata
def test_routes():
import bokeh.sampledata.airport_routes as bsa
assert isinstance(bsa.routes, pd.DataFrame)
# don't check detail for external data
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/miscmodels/try_mlecov.py | 33 | 7414 | '''Multivariate Normal Model with full covariance matrix
toeplitz structure is not exploited, need cholesky or inv for toeplitz
Author: josef-pktd
'''
from __future__ import print_function
import numpy as np
#from scipy import special #, stats
from scipy import linalg
from scipy.linalg import norm, toeplitz
import statsmodels.api as sm
from statsmodels.base.model import (GenericLikelihoodModel,
LikelihoodModel)
from statsmodels.tsa.arima_process import arma_acovf, arma_generate_sample
def mvn_loglike_sum(x, sigma):
'''loglike multivariate normal
copied from GLS and adjusted names
not sure why this differes from mvn_loglike
'''
nobs = len(x)
nobs2 = nobs / 2.0
SSR = (x**2).sum()
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(sigma) and sigma.ndim == 2:
#FIXME: robust-enough check? unneeded if _det_sigma gets defined
llf -= .5*np.log(np.linalg.det(sigma))
return llf
def mvn_loglike(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = linalg.inv(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
llf = - np.dot(x, np.dot(sigmainv, x))
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf
def mvn_loglike_chol(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
x_whitened = np.dot(cholsigmainv, x)
logdetsigma = np.log(np.linalg.det(sigma))
nobs = len(x)
from scipy import stats
print('scipy.stats')
print(np.log(stats.norm.pdf(x_whitened)).sum())
llf = - np.dot(x_whitened.T, x_whitened)
llf -= nobs * np.log(2 * np.pi)
llf -= logdetsigma
llf *= 0.5
return llf, logdetsigma, 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
#0.5 * np.dot(x_whitened.T, x_whitened) + nobs * np.log(2 * np.pi) + logdetsigma)
def mvn_nloglike_obs(x, sigma):
'''loglike multivariate normal
assumes x is 1d, (nobs,) and sigma is 2d (nobs, nobs)
brute force from formula
no checking of correct inputs
use of inv and log-det should be replace with something more efficient
'''
#see numpy thread
#Sturla: sqmahal = (cx*cho_solve(cho_factor(S),cx.T).T).sum(axis=1)
#Still wasteful to calculate pinv first
sigmainv = np.linalg.inv(sigma)
cholsigmainv = np.linalg.cholesky(sigmainv).T
#2 * np.sum(np.log(np.diagonal(np.linalg.cholesky(A)))) #Dag mailinglist
# logdet not needed ???
#logdetsigma = 2 * np.sum(np.log(np.diagonal(cholsigmainv)))
x_whitened = np.dot(cholsigmainv, x)
#sigmainv = linalg.cholesky(sigma)
logdetsigma = np.log(np.linalg.det(sigma))
sigma2 = 1. # error variance is included in sigma
llike = 0.5 * (np.log(sigma2) - 2.* np.log(np.diagonal(cholsigmainv))
+ (x_whitened**2)/sigma2
+ np.log(2*np.pi))
return llike
def invertibleroots(ma):
import numpy.polynomial as poly
pr = poly.polyroots(ma)
insideroots = np.abs(pr)<1
if insideroots.any():
pr[np.abs(pr)<1] = 1./pr[np.abs(pr)<1]
pnew = poly.Polynomial.fromroots(pr)
mainv = pn.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = ma
wasinvertible = True
return mainv, wasinvertible
def getpoly(self, params):
ar = np.r_[[1], -params[:self.nar]]
ma = np.r_[[1], params[-self.nma:]]
import numpy.polynomial as poly
return poly.Polynomial(ar), poly.Polynomial(ma)
class MLEGLS(GenericLikelihoodModel):
'''ARMA model with exact loglikelhood for short time series
Inverts (nobs, nobs) matrix, use only for nobs <= 200 or so.
This class is a pattern for small sample GLS-like models. Intended use
for loglikelihood of initial observations for ARMA.
TODO:
This might be missing the error variance. Does it assume error is
distributed N(0,1)
Maybe extend to mean handling, or assume it is already removed.
'''
def _params2cov(self, params, nobs):
'''get autocovariance matrix from ARMA regression parameter
ar parameters are assumed to have rhs parameterization
'''
ar = np.r_[[1], -params[:self.nar]]
ma = np.r_[[1], params[-self.nma:]]
#print('ar', ar
#print('ma', ma
#print('nobs', nobs
autocov = arma_acovf(ar, ma, nobs=nobs)
#print('arma_acovf(%r, %r, nobs=%d)' % (ar, ma, nobs)
#print(autocov.shape
#something is strange fixed in aram_acovf
autocov = autocov[:nobs]
sigma = toeplitz(autocov)
return sigma
def loglike(self, params):
sig = self._params2cov(params[:-1], self.nobs)
sig = sig * params[-1]**2
loglik = mvn_loglike(self.endog, sig)
return loglik
def fit_invertible(self, *args, **kwds):
res = self.fit(*args, **kwds)
ma = np.r_[[1], res.params[self.nar: self.nar+self.nma]]
mainv, wasinvertible = invertibleroots(ma)
if not wasinvertible:
start_params = res.params.copy()
start_params[self.nar: self.nar+self.nma] = mainv[1:]
#need to add args kwds
res = self.fit(start_params=start_params)
return res
if __name__ == '__main__':
nobs = 50
ar = [1.0, -0.8, 0.1]
ma = [1.0, 0.1, 0.2]
#ma = [1]
np.random.seed(9875789)
y = arma_generate_sample(ar,ma,nobs,2)
y -= y.mean() #I haven't checked treatment of mean yet, so remove
mod = MLEGLS(y)
mod.nar, mod.nma = 2, 2 #needs to be added, no init method
mod.nobs = len(y)
res = mod.fit(start_params=[0.1, -0.8, 0.2, 0.1, 1.])
print('DGP', ar, ma)
print(res.params)
from statsmodels.regression import yule_walker
print(yule_walker(y, 2))
#resi = mod.fit_invertible(start_params=[0.1,0,0.2,0, 0.5])
#print(resi.params
arpoly, mapoly = getpoly(mod, res.params[:-1])
data = sm.datasets.sunspots.load()
#ys = data.endog[-100:]
## ys = data.endog[12:]-data.endog[:-12]
## ys -= ys.mean()
## mods = MLEGLS(ys)
## mods.nar, mods.nma = 13, 1 #needs to be added, no init method
## mods.nobs = len(ys)
## ress = mods.fit(start_params=np.r_[0.4, np.zeros(12), [0.2, 5.]],maxiter=200)
## print(ress.params
## #from statsmodels.sandbox.tsa import arima as tsaa
## #tsaa
## import matplotlib.pyplot as plt
## plt.plot(data.endog[1])
## #plt.show()
sigma = mod._params2cov(res.params[:-1], nobs) * res.params[-1]**2
print(mvn_loglike(y, sigma))
llo = mvn_nloglike_obs(y, sigma)
print(llo.sum(), llo.shape)
print(mvn_loglike_chol(y, sigma))
print(mvn_loglike_sum(y, sigma))
| bsd-3-clause |
mwil/wanikani-userscripts | wanikani-simulator/plot.py | 1 | 1661 | import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Plot the results of the WK Simulator")
parser.add_argument("json_file")
args = parser.parse_args()
with open(args.json_file, "r") as infile:
sim_data = json.load(infile)
days = np.array(sim_data["day"])
daily_reviews = np.array(sim_data["review_cnt"])[:-1]
daily_levels = np.array(sim_data["level"])
fig, ax1 = plt.subplots()
# ax1.plot(days, daily_reviews, "g-")
# ax1.fill_between(days, 0, daily_reviews,
# where=daily_reviews > 0,
# facecolor="b",
# interpolate=True)
N = 7 # averaging period in days
avg = np.convolve(daily_reviews,
np.ones((N,))/N,
mode="full")
avg_days = np.arange(min(days), max(days)+(2*N-N))
ax1.plot(avg_days, avg, "b-")
# ax1.fill_between(avg_days, 0, avg,
# where=avg > 0,
# facecolor="b",
# interpolate=True)
ax1.set_xlabel("Time (days)")
ax1.set_ylabel("Daily Reviews")
ax2 = ax1.twinx()
ax2.plot(days, daily_levels, "r-", lw=4)
ax2.grid()
ax2.set_ylabel("WK Level", color="r")
ax2.yaxis.set_ticks(np.arange(0, 61, 5))
ax2.tick_params('y', colors='r')
# plt.title("3-Day Average Reviews per Day (Twice Daily 8h/20h, 100%)")
plt.title("Reviews per Day (8h/20h, r=100%/m=100%, 20 les/d)")
# fig.tight_layout()
plt.savefig("test.png", dpi=150, bbox_inches="tight")
| gpl-3.0 |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs/Supp_zOR_benchmark_altbaseline.py | 1 | 6055 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 9/3/14
###Function: mean peak-based retro zOR metric vs. CDC benchmark index, where sensitivity of zOR metric is examined. Given that 7 week fall baseline is what we used in our study, the 7 week summer baseline, 10 week fall baseline, and 10 week summer baseline plots are examined.
###Import data: CDC_Source/Import_Data/cdc_severity_index.csv, Py_export/SDI_national_classifications_summer-7.csv, Py_export/SDI_national_classifications_10.csv, Py_export/SDI_national_classifications_summer-10.csv
###Command Line: python Supp_zOR_benchmark_altbaseline.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions as fxn
### data structures ###
### functions ###
### data files ###
# benchmark data
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
# 10 week fall BL index
f10in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_10.csv','r')
f10in.readline()
f10 = csv.reader(f10in, delimiter=',')
# 7 week summer BL index
s7in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_summer-7.csv','r')
s7in.readline()
s7 = csv.reader(s7in, delimiter=',')
# 10 week summer BL index
s10in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_national_classifications_summer-10.csv','r')
s10in.readline()
s10 = csv.reader(s10in, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
### program ###
## import data ##
# d_benchmark[seasonnum] = CDC benchmark index value
d_benchmark = fxn.benchmark_import(ix, 8) # no ILINet
# d_nat_classif[season] = (mean retro zOR, mean early zOR)
d_f10 = fxn.readNationalClassifFile(f10)
d_s7 = fxn.readNationalClassifFile(s7)
d_s10 = fxn.readNationalClassifFile(s10)
## plot values ##
benchmark = [d_benchmark[s] for s in ps]
f10r = [d_f10[s][0] for s in ps]
s7r = [d_s7[s][0] for s in ps]
s10r = [d_s10[s][0] for s in ps]
print '10 week fall corr coef', np.corrcoef(benchmark, f10r)
print '7 week summer corr coef', np.corrcoef(benchmark, s7r)
print '10 week summer corr coef', np.corrcoef(benchmark, s10r)
# draw plots
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
# 10 week fall BL mean retro zOR vs. benchmark index
ax1.plot(benchmark, f10r, marker = 'o', color = 'black', linestyle = 'None')
ax1.vlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax1.hlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax1.fill([-5, -1, -1, -5], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax1.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax1.fill([1, 5, 5, 1], [-1, -1, -10, -10], facecolor='red', alpha=0.4)
ax1.annotate('Mild', xy=(-4.75,19), fontsize=fssml)
ax1.annotate('Severe', xy=(1.25,-8), fontsize=fssml)
for s, x, y in zip(sl, benchmark, f10r):
ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax1.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax1.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax1.tick_params(axis='both', labelsize=fssml)
ax1.set_xlim([-5,5])
ax1.set_ylim([-10,20])
ax1.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/baseline_sensitivity/zOR-fall10_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
# 7 week summer BL mean retro zOR vs. benchmark index
ax2.plot(benchmark, s7r, marker = 'o', color = 'black', linestyle = 'None')
ax2.vlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax2.hlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax2.fill([-5, -1, -1, -5], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax2.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax2.fill([1, 5, 5, 1], [-1, -1, -10, -10], facecolor='red', alpha=0.4)
ax2.annotate('Mild', xy=(-4.75,19), fontsize=fssml)
ax2.annotate('Severe', xy=(1.25,-8), fontsize=fssml)
for s, x, y in zip(sl, benchmark, s7r):
ax2.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax2.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax2.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-5,5])
ax2.set_ylim([-10,20])
ax2.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/baseline_sensitivity/zOR-summer7_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
# 10 week summer BL mean retro zOR vs. benchmark index
ax3.plot(benchmark, s10r, marker = 'o', color = 'black', linestyle = 'None')
ax3.vlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax3.hlines([-1, 1], -10, 20, colors='k', linestyles='solid')
ax3.fill([-5, -1, -1, -5], [1, 1, 20, 20], facecolor='blue', alpha=0.4)
ax3.fill([-1, 1, 1, -1], [-1, -1, 1, 1], facecolor='yellow', alpha=0.4)
ax3.fill([1, 5, 5, 1], [-1, -1, -10, -10], facecolor='red', alpha=0.4)
ax3.annotate('Mild', xy=(-4.75,19), fontsize=fssml)
ax3.annotate('Severe', xy=(1.25,-8), fontsize=fssml)
for s, x, y in zip(sl, benchmark, s10r):
ax3.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax3.set_ylabel(fxn.gp_sigma_r, fontsize=fs)
ax3.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax3.tick_params(axis='both', labelsize=fssml)
ax3.set_xlim([-5,5])
ax3.set_ylim([-10,20])
ax3.invert_yaxis()
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs/Supp/baseline_sensitivity/zOR-summer10_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show() | mit |
RomainBrault/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
QLaboratory/QlabChallengerRepo | ai_challenger_scene/classification_statistics/classification_statistics.py | 1 | 8128 | # -*- coding: utf-8 -*-
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# matplotlib.use('qt4agg')
# 指定默认字体
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['font.family'] = 'sans-serif'
# 解决负号'-'显示为方块的问题
matplotlib.rcParams['axes.unicode_minus'] = False
# zhfont = matplotlib.font_manager.FontProperties(fname='C:\Windows\Fonts\simkai.ttf')
OFFICIAL_CLASSIFICATION_RAW_FILE_PATH = r"D:\QlabChallengerRepo\ai_challenger_scene\classification_statistics\scene_validation_dictionaries.json"
PREDICTION_CLASSIFICATION_FILE_PATH = r"D:\QlabChallengerRepo\ai_challenger_scene\classification_statistics\ResNet152_predict_validation.json"
SCENE_CLASSES_RAW_FILE_PATH = r"D:\QlabChallengerRepo\ai_challenger_scene\classification_statistics\scene_classes.json"
# OFFICIAL_CLASSIFICATION_RAW_FILE_PATH = r"C:\Users\Air\Desktop\classification_statistics\scene_validation_dictionaries.json"
# PREDICTION_CLASSIFICATION_FILE_PATH = r"C:\Users\Air\Desktop\ALL\ResNet152_predict_validation.json"
# SCENE_CLASSES_RAW_FILE_PATH = r"C:\Users\Air\Desktop\classification_statistics\scene_classes.json"
sceneClassesFile = open(SCENE_CLASSES_RAW_FILE_PATH, 'r', encoding='UTF-8')
sceneClassesJSON = json.load(sceneClassesFile)
officialClassificationFile = open(OFFICIAL_CLASSIFICATION_RAW_FILE_PATH, 'r', encoding='UTF-8')
officialClassificationDICT = json.load(officialClassificationFile)
predictionClassificationFile = open(PREDICTION_CLASSIFICATION_FILE_PATH, 'r', encoding='UTF-8')
predictionClassificationLIST = json.load(predictionClassificationFile)
# 存储场景分类错误单张影像的详细信息
ClassificationErrorDetailDictionary = []
# 存储场景分类错误统计信息
ClassificationErrorStatisticsMatrix = np.zeros((80, 84))
# 遍历预测的JSON文件
for i in range(len(predictionClassificationLIST)):
# 获取当前场景的正确分类ID
image_id_temp = predictionClassificationLIST[i]['image_id']
officail_label_id_temp = officialClassificationDICT[image_id_temp]
ClassificationErrorStatisticsMatrix[int(officail_label_id_temp), 1] += 1
# 获取当前场景的预测分类ID
predict_label_id_temp = predictionClassificationLIST[i]['label_id']
prediction_bool_temp = False
predict_label_id_sort = np.asarray(predict_label_id_temp).argsort()[-3:][::-1].tolist()
if int(officail_label_id_temp) in predict_label_id_sort:
prediction_bool_temp = True
classification_error_temp = {}
if prediction_bool_temp is False:
# 统计分类错误场景信息
ClassificationErrorStatisticsMatrix[int(officail_label_id_temp), 2] += 1
# 格式化输出分类错误场景信息
classification_error_temp['image_id'] = image_id_temp
classification_error_temp['label_id'] = officail_label_id_temp
classification_error_temp['label_id_name'] = sceneClassesJSON[officail_label_id_temp]
# 太长,暂不输出预测概率
# classification_error_temp['label_id_predict'] = officail_label_id_temp
label_id_predict_name_temp = []
for k in predict_label_id_sort:
label_id_predict_name_temp.append(sceneClassesJSON[str(k)])
ClassificationErrorStatisticsMatrix[int(officail_label_id_temp), k+4] += 1
classification_error_temp['label_id_predict_name'] = label_id_predict_name_temp
# print(classification_error_temp)
ClassificationErrorDetailDictionary.append(classification_error_temp)
# 存储场景分类错误统计输出信息
ClassificationErrorStatisticDictionary = []
for i in range(80):
ClassificationErrorStatisticsMatrix[i, 0] = i
ClassificationErrorStatisticsMatrix[i, 3] = np.true_divide(ClassificationErrorStatisticsMatrix[i, 2], ClassificationErrorStatisticsMatrix[i, 1]) * 100
classification_error_statistic_temp = {}
classification_error_statistic_temp['label_id'] = i
classification_error_statistic_temp['label_id_name'] = sceneClassesJSON[str(i)]
classification_error_statistic_temp['total_number'] = ClassificationErrorStatisticsMatrix[i, 1]
classification_error_statistic_temp['classification_error_number'] = ClassificationErrorStatisticsMatrix[i, 2]
classification_error_statistic_temp['classification_error_percentage'] = ClassificationErrorStatisticsMatrix[i, 3]
classification_error_statistic_temp_list = ClassificationErrorStatisticsMatrix[i, 4:len(ClassificationErrorStatisticsMatrix[0])]
classification_error_statistic_temp_list_sort = classification_error_statistic_temp_list.argsort()[-3:][::-1].tolist()
classification_error_statistic_temp['classification_error_statistic_label_id'] = classification_error_statistic_temp_list_sort
classification_error_statistic_name_temp = []
for k in classification_error_statistic_temp_list_sort:
classification_error_statistic_name_temp.append(sceneClassesJSON[str(k)])
classification_error_statistic_temp['classification_error_statistic_label_name'] = classification_error_statistic_name_temp
ClassificationErrorStatisticDictionary.append(classification_error_statistic_temp)
# print(classification_error_statistic_temp)
# 输出场景分类错误,单张图片的详细信息
(filepath, tempfilename) = os.path.split(PREDICTION_CLASSIFICATION_FILE_PATH)
(shotname, extension) = os.path.splitext(tempfilename)
CLASSIFICATION_ERROR_DETAIL_STATISTICS_FILE_PATH = shotname + "_classification_error_detail_statistics" + ".json"
CLASSIFICATION_ERROR_STATISTICS_FILE = open(CLASSIFICATION_ERROR_DETAIL_STATISTICS_FILE_PATH, "w", encoding='UTF-8')
# print(len(ClassificationErrorDetailDictionary))
json.dump(ClassificationErrorDetailDictionary, CLASSIFICATION_ERROR_STATISTICS_FILE, indent=2, ensure_ascii=False)
# 输出场景分类错误,每个场景的统计信息
CLASSIFICATION_ERROR_TOTAL_STATISTICS_FILE_PATH = shotname + "_classification_error_total_statistics" + ".json"
CLASSIFICATION_ERROR_TOTAL_STATISTICS_FILE = open(CLASSIFICATION_ERROR_TOTAL_STATISTICS_FILE_PATH, "w", encoding='UTF-8')
json.dump(ClassificationErrorStatisticDictionary, CLASSIFICATION_ERROR_TOTAL_STATISTICS_FILE, indent=2, ensure_ascii=False)
# 输出场景分类错误统计矩阵信息
CLASSIFICATION_ERROR_STATISTICS_MATRIX_FILE_PATH = shotname + "_classification_error_statistics" + ".txt"
# print(ClassificationErrorStatisticsMatrix)
# np.savetxt(CLASSIFICATION_ERROR_STATISTICS_MATRIX_FILE_PATH, ClassificationErrorStatisticsMatrix, fmt='%d', delimiter='\t', newline='\r\n')
ClassificationErrorStatisticsMatrixSort = ClassificationErrorStatisticsMatrix[ClassificationErrorStatisticsMatrix[:,3].argsort()]
np.savetxt(CLASSIFICATION_ERROR_STATISTICS_MATRIX_FILE_PATH, ClassificationErrorStatisticsMatrixSort, fmt='%d', delimiter='\t', newline='\r\n')
alphabetX = []
alphabetY = []
count = 0
for i in ClassificationErrorStatisticsMatrixSort:
alphabetX.append(str(count))
alphabetY.append(sceneClassesJSON[str(int(i[0]))])
count += 1
def ConfusionMatrixPng(cm, ):
norm_conf = []
for i in cm:
a = 0
tmp_arr = []
a = sum(i)
# print(a)
for j in i:
tmp_arr.append(float(j) / (float(a) + 1.1e-5))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.gray_r,
interpolation='nearest')
height = len(cm)
width = len(cm[0])
cb = fig.colorbar(res)
# locs, labels = plt.xticks(range(width), alphabet[:width])
# for t in labels:
# t.set_rotation(90)
# plt.xticks('orientation', 'vertical')
# locs, labels = xticks([1,2,3,4], ['Frogs', 'Hogs', 'Bogs', 'Slogs'])
# setp(alphabet, 'rotation', 'vertical')
plt.xticks(range(width), alphabetX[:width])
plt.yticks(range(height), alphabetY[:height])
plt.savefig('confusion_matrix.png', format='png')
plt.show()
ConfusionMatrixPng(ClassificationErrorStatisticsMatrix[:, 4:len(ClassificationErrorStatisticsMatrixSort[0])])
| mit |
ysasaki6023/NeuralNetworkStudy | cifar02/Output/Ksize211_L4_0/train.py | 24 | 10142 | #!/usr/bin/env python
import argparse
import time
import numpy as np
import six
import os
import shutil
import chainer
from chainer import computational_graph
from chainer import cuda
import chainer.links as L
import chainer.functions as F
from chainer import optimizers
from chainer import serializers
from chainer.utils import conv
import cPickle as pickle
import matplotlib.pyplot as plt
import matplotlib.cm
class ImageProcessNetwork(chainer.Chain):
def __init__(self,
I_colors, I_Xunit, I_Yunit, F_unit,
N_PLayers = 5,
P0C_feature = 3,
P1C_feature = 3,
P2C_feature = 3,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 2,
P2P_ksize = 2,
L1_dropout = 0.5,
L2_dropout = 0.5,
L2_unit = 256,
):
super(ImageProcessNetwork, self).__init__()
self.IsTrain = True
self.NPLayers = N_PLayers
self.NFeatures = [I_colors]
self.NFilter = [1]
self.NKsize = [1]
self.NImgPix = [(I_Xunit,I_Yunit)]
self.L1_dropout = L1_dropout
self.L2_dropout = L2_dropout
self.L2_unit = L2_unit
for iL in range(self.NPLayers):
## Set Variables
self.NFeatures.append(self.gradualVariable(iL,self.NPLayers,P0C_feature,P1C_feature,P2C_feature))
self.NFilter.append( self.gradualVariable(iL,self.NPLayers,P0C_filter ,P1C_filter ,P2C_filter ))
self.NKsize.append( self.gradualVariable(iL,self.NPLayers,P0P_ksize ,P1P_ksize ,P2P_ksize ))
## Update layers
self.NImgPix.append(
( conv.get_conv_outsize( self.NImgPix[-1][0], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True),
conv.get_conv_outsize( self.NImgPix[-1][1], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True)))
self.add_link("P%d"%iL,L.Convolution2D( self.NFeatures[-2], self.NFeatures[-1],
self.NFilter[-1] , pad=int(self.NFilter[-1]/2.)))
self.add_link("L1",L.Linear( self.NImgPix[-1][0] * self.NImgPix[-1][1] * self.NFeatures[-1] , L2_unit))
self.add_link("L2",L.Linear( L2_unit, F_unit))
return
def gradualVariable(self, cLayer, tLayer, val0, val1, val2):
pos = 0.5
if cLayer <= int(pos*tLayer): v0, v1, p0, p1, pc = val0, val1, 0, int(pos*tLayer), int( cLayer - 0 )
else : v0, v1, p0, p1, pc = val1, val2, int(pos*tLayer), tLayer-1, int( cLayer - int(pos*tLayer))
return int(float(v0) + (float(v1)-float(v0))/(float(p1)-float(p0))*float(pc))
def setTrainMode(self, IsTrain):
self.IsTrain = IsTrain
return
def __call__(self, x):
h = x
for iL in range(self.NPLayers):
h = self.__dict__["P%d"%iL](h)
h = F.local_response_normalization(h)
h = F.max_pooling_2d(F.relu(h), ksize=self.NKsize[iL+1], cover_all=True)
h = F.dropout(F.relu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain)
h = F.dropout(F.relu(self.L2(h)),ratio=self.L2_dropout,train=self.IsTrain)
y = h
return y
def CifarAnalysis(folderName=None,n_epoch=1,batchsize = 1000, **kwd):
id_gpu = 0
OutStr = ""
OutStr += 'GPU: {}\n'.format(id_gpu)
OutStr += 'Minibatch-size: {}\n'.format(batchsize)
OutStr += 'epoch: {}\n'.format(n_epoch)
OutStr += 'kwd: {}\n'.format(kwd)
OutStr += ''
print OutStr
fOutput = None
fInfo = None
if folderName:
if not os.path.exists(folderName):
os.makedirs(folderName)
fOutput = open(os.path.join(folderName,"output.dat"),"w")
fInfo = open(os.path.join(folderName,"info.dat"),"w")
shutil.copyfile(__file__,os.path.join(folderName,os.path.basename(__file__)))
if fInfo: fInfo.write(OutStr)
# Prepare dataset
InDataBatch = []
data_tr = np.zeros((50000,3*32*32),dtype=np.float32)
data_ev = np.zeros((10000,3*32*32),dtype=np.float32)
label_tr = np.zeros((50000),dtype=np.int32)
label_ev = np.zeros((10000),dtype=np.int32)
for i in range(1,5+1):
with open("data_cifar10/data_batch_%d"%i,"r") as f:
tmp = pickle.load(f)
data_tr [(i-1)*10000:i*10000] = tmp["data"]
label_tr[(i-1)*10000:i*10000] = tmp["labels"]
with open("data_cifar10/test_batch","r") as f:
tmp = pickle.load(f)
data_ev [:] = tmp["data"]
label_ev [:] = tmp["labels"]
## Prep
print "Normalizing data ..."
def Normalize(x):
avg = np.average(x,axis=1).reshape((len(x),1))
std = np.sqrt(np.sum(x*x,axis=1) - np.sum(x,axis=1)).reshape((len(x),1))
y = (x - avg) / std
return y
data_tr = Normalize(data_tr)
data_ev = Normalize(data_ev)
x_tr = data_tr.reshape((len(data_tr),3,32,32))
x_ev = data_ev.reshape((len(data_ev),3,32,32))
y_tr = label_tr
y_ev = label_ev
N_tr = len(data_tr) # 50000
N_ev = len(data_ev) # 10000
## Define analisis
Resume = None
if "Resume" in kwd:
Resume = kwd["Resume"]
del kwd["Resume"]
model = L.Classifier(ImageProcessNetwork(I_colors=3, I_Xunit=32, I_Yunit=32, F_unit = 10, **kwd))
if id_gpu >= 0:
cuda.get_device(id_gpu).use()
model.to_gpu()
xp = np if id_gpu < 0 else cuda.cupy
# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)
# Init/Resume
if Resume:
print('Load optimizer state from', Resume)
serializers.load_hdf5(Resume+".state", optimizer)
serializers.load_hdf5(Resume+".model", model)
# Learning loop
if fOutput: fOutput.write("epoch,mode,loss,accuracy\n")
for epoch in six.moves.range(1, n_epoch + 1):
print 'epoch %d'%epoch
# training
perm = np.random.permutation(N_tr)
sum_accuracy = 0
sum_loss = 0
start = time.time()
for i in six.moves.range(0, N_tr, batchsize):
x = chainer.Variable(xp.asarray(x_tr[perm[i:i + batchsize]]))
t = chainer.Variable(xp.asarray(y_tr[perm[i:i + batchsize]]))
# Pass the loss function (Classifier defines it) and its arguments
model.predictor.setTrainMode(True)
optimizer.update(model, x, t)
if (epoch == 1 and i == 0) and folderName:
with open(os.path.join(folderName,'graph.dot'), 'w') as o:
g = computational_graph.build_computational_graph(
(model.loss, ))
o.write(g.dump())
print 'graph generated'
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
end = time.time()
elapsed_time = end - start
throughput = N_tr / elapsed_time
print 'train mean loss=%.3f, accuracy=%.1f%%, throughput=%.0f images/sec'%(sum_loss / N_tr, sum_accuracy / N_tr * 100., throughput)
if fOutput: fOutput.write("%d,Train,%e,%e\n"%(epoch,sum_loss/N_tr,sum_accuracy/N_tr))
# evaluation
perm = np.random.permutation(N_ev)
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N_ev, batchsize):
x = chainer.Variable(xp.asarray(x_ev[perm[i:i + batchsize]]),volatile='on')
t = chainer.Variable(xp.asarray(y_ev[perm[i:i + batchsize]]),volatile='on')
model.predictor.setTrainMode(False)
loss = model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
print 'test mean loss=%.3f, accuracy=%.1f%%'%(sum_loss / N_ev, sum_accuracy / N_ev * 100, )
if fOutput: fOutput.write("%d,Test,%e,%e\n"%(epoch,sum_loss/N_ev,sum_accuracy/N_ev))
if folderName and (epoch%10 == 0 or epoch==n_epoch):
# Save the model and the optimizer
if epoch == n_epoch:
myFname = os.path.join(folderName,'mlp_final')
else:
myFname = os.path.join(folderName,'mlp_%d'%n_epoch)
#print 'save the model'
serializers.save_hdf5(myFname+".model", model)
serializers.save_hdf5(myFname+".state", optimizer)
if fOutput: fOutput.close()
if fInfo : fInfo.close()
if __name__=="__main__":
n_epoch = 200
for i in range(10):
for l in [6,5,4,3]:
CifarAnalysis("Output/Ksize222_L%d_%d"%(l,i),
n_epoch=n_epoch,
batchsize = 1000,
N_PLayers = l,
P0C_feature = 32,
P1C_feature = 16,
P2C_feature = 16,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 2,
P2P_ksize = 2,
L1_dropout = 0.5,
L2_dropout = 0.0,
L2_unit = 100)
CifarAnalysis("Output/Ksize211_L%d_%d"%(l,i),
n_epoch=n_epoch,
batchsize = 1000,
N_PLayers = l,
P0C_feature = 32,
P1C_feature = 16,
P2C_feature = 16,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 1,
P2P_ksize = 1,
L1_dropout = 0.5,
L2_dropout = 0.0,
L2_unit = 100)
| mit |
PeterSchichtel/hepstore | hepstore/core/school/books/classification/lda.py | 1 | 1928 | #!/usr/bin/env python
# from general
import sklearn.discriminant_analysis
import numpy as np
import time
import os
# from hepstore
from hepstore.core.utility import *
from hepstore.core.statistic.distribution import Log10Flat as Log10Flat
# from local
import tuning
# out own LDA interface
class LinearDiscriminantAnalysis(sklearn.discriminant_analysis.LinearDiscriminantAnalysis):
# constructor
def __init__(self,
solver='svd', shrinkage=None, priors=None, n_components=None,
store_covariance=False, tol=0.0001, path=os.getcwd(),
random_state=None, jobs=1 ):
if solver=='svd' and shrinkage is not None:
print "--LDA: warning setting 'shrinkage' to 'None', for SVD"
shrinkage=None
pass
sklearn.discriminant_analysis.LinearDiscriminantAnalysis.__init__(
self,solver=solver, shrinkage=shrinkage, priors=priors,
n_components=n_components, store_covariance=store_covariance, tol=tol
)
self.path = path
self.random_state = random_state
self.jobs = jobs
pass
# explore parameters of this algorithm
def explore( self, X, y ):
# generate unique path dependent on core algorithm
path = os.path.join(self.path,self.solver)
print "--LDA: explore"
# specify parameters for exploration
if self.solver == 'svd':
param_dist = {
'tol' : Log10Flat(-10,-0.001)
}
pass
else:
param_dist = {
'shrinkage': Log10Flat(-10,-0.001),
}
pass
# tune classifier
tuning.tune( self, X, y, param_dist,
path = path,
jobs = self.jobs,
random_state = self.random_state
)
pass
pass
| gpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/utils/estimator_checks.py | 3 | 66863 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.base import (clone, TransformerMixin, ClusterMixin,
BaseEstimator, is_classifier, is_regressor)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.linear_model.stochastic_gradient import BaseSGD
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter, _num_samples
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in
["MultinomialNB", "LabelPropagation", "LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(estimator, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
estimator.fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised error as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, estimator):
for check in _yield_non_meta_checks(name, estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(name, estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(name, estimator):
yield check
if isinstance(estimator, TransformerMixin):
for check in _yield_transformer_checks(name, estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(name, estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
"""
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
check_no_fit_attributes_set_in_init(name, Estimator)
estimator = Estimator()
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"
and not isinstance(estimator, BaseSGD)):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in ['Scaler', 'StandardScaler']:
estimator = clone(estimator).set_params(with_mean=False)
else:
estimator = clone(estimator)
# fit and predict
try:
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(estimator, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, estimator_orig):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, estimator_orig):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_general(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, transformer, X, y)
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformers_unfitted(name, transformer):
X, y = _boston_subset()
transformer = clone(transformer)
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, transformer_orig, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert_equal(_num_samples(X_pred2), n_samples)
assert_equal(_num_samples(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(e, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, estimator)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
estimator = clone(estimator_orig)
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, estimator.partial_fit, X[:, :-1], y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_clustering(name, clusterer_orig):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
assert_equal(clusterer.labels_.shape, (n_samples,))
pred = clusterer.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
@ignore_warnings(category=DeprecationWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=DeprecationWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
classifier = clone(classifier_orig)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, classifier_orig):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes == 3 and
# 1on1 of LibSVM works differently
not isinstance(classifier, BaseLibSVM)):
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_allclose(np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_fit_returns_self(name, estimator_orig):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = clone(estimator_orig)
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_supervised_y_2d(name, estimator_orig):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_classes(name, classifier_orig):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_int(name, regressor_orig):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_regressors_train(name, regressor_orig):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, y)
rnd = np.random.RandomState(0)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
regressor = clone(regressor_orig)
y = multioutput_estimator_convert_y_2d(regressor, X[:, 0])
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_classifiers(name, classifier_orig):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
assert_greater(np.mean(y_pred == 0), 0.87)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_classifiers(name, classifier_orig, X_train,
y_train, X_test, y_test, weights):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
estimator = clone(estimator_orig)
y = multioutput_estimator_convert_y_2d(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
# this check works on classes, not instances
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=DeprecationWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(estimator_orig, y)
check_estimators_data_not_an_array(name, estimator_orig, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_estimators_data_not_an_array(name, estimator_orig, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# this check works on classes, not instances
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=(DeprecationWarning, FutureWarning)):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
if (issubclass(Estimator, BaseSGD) and
init_param.name in ['tol', 'max_iter']):
# To remove in 0.21, when they get their future default values
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default, init_param.name)
def multioutput_estimator_convert_y_2d(estimator, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in estimator.__class__.__name__:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(estimator, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = clone(estimator_orig)
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
| mit |
xwolf12/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
Achuth17/scikit-bio | skbio/util/_testing.py | 1 | 5213 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import inspect
import pandas.util.testing as pdt
from nose import core
from nose.tools import nottest
from future.utils import PY3
from ._decorator import experimental
@nottest
class TestRunner(object):
"""Simple wrapper class around nosetests functionality.
Parameters
----------
filename : str
__file__ attribute passed in from the caller. This tells the
tester where to start looking for tests.
Notes
-----
The primary purpose of this class is to create an interface which users
of scikit-bio can use to run all of the built in tests. Normally this
would be done by invoking nosetests directly from the command line, but
scikit-bio needs several additional options which make the command long
and ugly. This class invokes nose with the required options.
"""
@experimental(as_of="0.4.0")
def __init__(self, filename):
self._filename = filename
self._test_dir = os.path.dirname(filename)
@experimental(as_of="0.4.0")
def test(self, verbose=False):
"""Performs the actual running of the tests.
Parameters
----------
verbose : bool
flag for running in verbose mode.
Returns
-------
bool
test run success status
"""
# NOTE: it doesn't seem to matter what the first element of the argv
# list is, there just needs to be something there.
argv = [self._filename, '-I DO_NOT_IGNORE_ANYTHING']
if not PY3:
argv.extend(['--with-doctest', '--doctest-tests'])
if verbose:
argv.append('-v')
return core.run(argv=argv, defaultTest=self._test_dir)
@experimental(as_of="0.4.0")
def get_data_path(fn, subfolder='data'):
"""Return path to filename ``fn`` in the data folder.
During testing it is often necessary to load data files. This
function returns the full path to files in the ``data`` subfolder
by default.
Parameters
----------
fn : str
File name.
subfolder : str, defaults to ``data``
Name of the subfolder that contains the data.
Returns
-------
str
Inferred absolute path to the test data for the module where
``get_data_path(fn)`` is called.
Notes
-----
The requested path may not point to an existing file, as its
existence is not checked.
"""
# getouterframes returns a list of tuples: the second tuple
# contains info about the caller, and the second element is its
# filename
callers_filename = inspect.getouterframes(inspect.currentframe())[1][1]
path = os.path.dirname(os.path.abspath(callers_filename))
data_path = os.path.join(path, subfolder, fn)
return data_path
@experimental(as_of="0.4.0")
def assert_data_frame_almost_equal(left, right):
"""Raise AssertionError if ``pd.DataFrame`` objects are not "almost equal".
Wrapper of ``pd.util.testing.assert_frame_equal``. Floating point values
are considered "almost equal" if they are within a threshold defined by
``assert_frame_equal``. This wrapper uses a number of
checks that are turned off by default in ``assert_frame_equal`` in order to
perform stricter comparisons (for example, ensuring the index and column
types are the same). It also does not consider empty ``pd.DataFrame``
objects equal if they have a different index.
Other notes:
* Index (row) and column ordering must be the same for objects to be equal.
* NaNs (``np.nan``) in the same locations are considered equal.
This is a helper function intended to be used in unit tests that need to
compare ``pd.DataFrame`` objects.
Parameters
----------
left, right : pd.DataFrame
``pd.DataFrame`` objects to compare.
Raises
------
AssertionError
If `left` and `right` are not "almost equal".
See Also
--------
pandas.util.testing.assert_frame_equal
"""
# pass all kwargs to ensure this function has consistent behavior even if
# `assert_frame_equal`'s defaults change
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
# this check ensures that empty DataFrames with different indices do not
# compare equal. exact=True specifies that the type of the indices must be
# exactly the same
pdt.assert_index_equal(left.index, right.index,
exact=True,
check_names=True)
| bsd-3-clause |
rafwiewiora/msmbuilder | msmbuilder/project_templates/cluster/sample-clusters-plot.py | 9 | 2612 | """Plot the result of sampling clusters
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta, ttrajs = load_trajs('ttrajs')
txx = np.concatenate(list(ttrajs.values()))
kmeans = load_generic('kmeans.pickl')
inds = load_generic("cluster-sample-inds.pickl")
coordinates = [
np.asarray([ttrajs[traj_i][frame_i, :] for traj_i, frame_i in state_inds])
for state_inds in inds
]
## Overlay sampled states on histogram
def plot_sampled_states(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap='magma_r',
mincnt=1,
bins='log',
alpha=0.8,
)
# Show sampled points as scatter
# Annotate cluster index
for i, coo in enumerate(coordinates):
plt.scatter(coo[:, 0], coo[:, 1], c=colors[i % 6], s=40)
ax.text(kmeans.cluster_centers_[i, 0],
kmeans.cluster_centers_[i, 1],
"{}".format(i),
ha='center',
va='center',
size=16,
bbox=dict(
boxstyle='round',
fc='w',
ec="0.5",
alpha=0.9,
),
zorder=10,
)
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
## Render a script for loading in vmd
def load_in_vmd(dirname='cluster_samples'):
k = len(inds[0])
templ = [
'# autogenerated by msmbuilder',
'# open with `vmd -e load-cluster-samples.tcl`',
'',
'# Defaults',
'mol default material Transparent',
'mol default representation NewCartoon',
'',
]
for i in range(len(inds)):
templ += [
'# State {}'.format(i),
'mol new top.pdb',
'mol addfile {}/{}.xtc waitfor all'.format(dirname, i),
'animate delete beg 0 end 0 top',
'mol rename top State-{}'.format(i),
'mol modcolor 0 top ColorID {}'.format(i),
'mol drawframes top 0 0:{k}'.format(k=k),
'',
]
return '\n'.join(templ)
## Plot
fig, ax = plt.subplots(figsize=(7, 5))
plot_sampled_states(ax)
fig.tight_layout()
fig.savefig('cluster-samples.pdf')
# {{xdg_open('cluster-samples.pdf')}}
## Render vmd
with open('load-cluster-samples.tcl', 'w') as f:
f.write(load_in_vmd())
| lgpl-2.1 |
sawenzel/AliceO2 | Detectors/MUON/MCH/Raw/ElecMap/src/elecmap.py | 3 | 8911 | #!/usr/bin/env python
from subprocess import call
import os
import argparse
from oauth2client.service_account import ServiceAccountCredentials
import gspread
import numpy as np
import pandas as pd
def gencode_clang_format(filename):
""" Run clang-format on file """
clang_format = ["clang-format", "-i", filename]
return call(clang_format)
def gencode_open_generated(filename):
""" Open a new file and add a Copyright on it """
out = open(filename, "w")
gencode_generated_code(out)
return out
def gencode_close_generated(out):
""" Format and close """
out.close()
gencode_clang_format(out.name)
def gencode_generated_code(out):
""" Add full O2 Copyright to out"""
out.write('''// Copyright CERN and copyright holders of ALICE O2. This software is
// distributed under the terms of the GNU General Public License v3 (GPL
// Version 3), copied verbatim in the file "COPYING".
//
// See http://alice-o2.web.cern.ch/license for full licensing information.
//
// In applying this license CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
///
/// GENERATED CODE ! DO NOT EDIT !
///
''')
def gencode_insert_row_in_map(out, row):
def insert_in_map(dsid, index):
out.write("add(e2d,{},{},{},{},{});\n"
.format(row.de_id, dsid, row.solar_id, row.group_id, index))
insert_in_map(row.ds_id_0, 0)
insert_in_map(row.ds_id_1, 1)
if row.ds_id_2:
insert_in_map(row.ds_id_2, 2)
if row.ds_id_3:
insert_in_map(row.ds_id_3, 3)
if row.ds_id_4:
insert_in_map(row.ds_id_4, 4)
def gencode_do(df, df_cru, solar_map, chamber):
""" Generate code for one chamber
Information from the dataframe df is used to create c++ code that
builds a couple of std::map
"""
out = gencode_open_generated(chamber + ".cxx")
out.write('''
#include "CH.cxx"
''')
out.write(
"void fillElec2Det{}(std::map<uint32_t,uint32_t>& e2d){{".format(chamber))
for row in df.itertuples():
gencode_insert_row_in_map(out, row)
out.write("}")
out.write(
"void fillSolar2FeeLink{}(std::map<uint16_t, uint32_t>& s2f){{".format(chamber))
for row in df_cru.itertuples():
if len(row.solar_id) > 0:
out.write("add_cru(s2f,{},{},{});\n".format(
row.fee_id, int(row.link_id)%12, row.solar_id))
out.write("}")
gencode_close_generated(out)
def gs_read_sheet(credential_file, workbook, sheet_name):
""" Read a Google Spreadsheet
"""
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
credential_file, scope) # Your json file here
gc = gspread.authorize(credentials)
wks = gc.open(workbook).worksheet(sheet_name)
data = wks.get_all_values()
cols = np.array([0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])
df = pd.DataFrame(np.asarray(data)[:, cols], columns=["cru", "fiber", "crate", "solar",
"solar_local_id", "j", "solar_id", "flat_cable_name",
"length", "de",
"ds1", "ds2", "ds3", "ds4", "ds5"])
return df.iloc[3:]
def gs_read_sheet_cru(credential_file, workbook, sheet_name):
""" Read a Google Spreadsheet
"""
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
credential_file, scope) # Your json file here
gc = gspread.authorize(credentials)
wks = gc.open(workbook).worksheet(sheet_name)
data = wks.get_all_values()
# LINK ID CRU ID CRU LINK DWP CRU ADDR DW ADDR FEE ID
cols = np.array([0, 1, 2, 3, 4, 5,6,7])
df = pd.DataFrame(np.asarray(data)[:, cols],
columns=["solar_id", "cru_id", "link_id", "cru_sn",
"dwp", "cru_address_0", "cru_address_1",
"fee_id"])
return df.iloc[1:]
def excel_get_dataframe(filename, sheet):
""" Read a dataframe from an excel file """
f = pd.read_excel(filename, sheet_name=sheet,
names=["cru", "fiber", "crate", "solar",
"solar_local_id", "j", "slat",
"length", "de",
"ds1", "ds2", "ds3", "ds4", "ds5"],
usecols="A:N",
na_values=[" "],
na_filter=True)
return f
def excel_is_valid_file(excel_parser, arg, sheet):
print(arg, sheet)
if not os.path.isfile(arg):
return excel_parser.error("The file %s does not exist!" % arg)
return excel_get_dataframe(arg, sheet)
def _simplify_dataframe(df):
""" Do some cleanup on the dataframe """
# remove lines where only the "CRATE #" column is
# different from NaN
df = df[df.crate != ""]
# row_list is a dictionary where we'll put only the information we need
# from the input DataFrame (df)
row_list = []
solar_map = {}
for row in df.itertuples():
# print(row)
crate = int(str(row.crate).strip('C '))
solar_pos = int(row.solar.split('-')[2].strip('S '))-1
group_id = int(row.solar.split('-')[3].strip('J '))-1
solar_id = crate*8 + solar_pos
de_id = int(row.de)
d = dict({
'cru_id': row.cru,
'solar_id': solar_id,
'group_id': group_id,
'de_id': de_id,
'ds_id_0': int(row.ds1)
})
d['ds_id_1'] = int(row.ds2) if pd.notna(
row.ds2) and len(row.ds2) > 0 else 0
d['ds_id_2'] = int(row.ds3) if pd.notna(
row.ds3) and len(row.ds3) > 0 else 0
d['ds_id_3'] = int(row.ds4) if pd.notna(
row.ds4) and len(row.ds4) > 0 else 0
d['ds_id_4'] = int(row.ds5) if pd.notna(
row.ds5) and len(row.ds5) > 0 else 0
solar_map[solar_id] = de_id
row_list.append(d)
# create the output DataFrame (sf) from the row_list dict
sf = pd.DataFrame(row_list, dtype=np.int16)
print("solar_map", solar_map)
return sf, solar_map
parser = argparse.ArgumentParser()
parser.add_argument('--excel', '-e', dest="excel_filename",
action="append",
help="input excel filename(s)")
parser.add_argument('--google_sheet', '-gs', dest="gs_name",
help="input google sheet name")
parser.add_argument('-s', '--sheet',
dest="sheet",
required=True,
help="name of the excel sheet to consider in the excel file")
parser.add_argument('-c', '--chamber',
dest="chamber",
help="output c++ code for chamber")
parser.add_argument('--verbose', '-v',
dest="verbose", default=False, action="store_true",
help="verbose")
parser.add_argument('--credentials',
dest="credentials",
help="json credential file for Google Sheet API access")
parser.add_argument("--fec_map", "-f",
dest="fecmapfile",
help="fec.map output filename")
args = parser.parse_args()
df = pd.DataFrame()
df_cru = pd.DataFrame()
if args.excel_filename:
for ifile in args.excel_filename:
df = df.append(excel_is_valid_file(parser, ifile, args.sheet))
if args.gs_name:
df = df.append(gs_read_sheet(args.credentials, args.gs_name, args.sheet))
df, solar_map = _simplify_dataframe(df)
df_cru = df_cru.append(gs_read_sheet_cru(args.credentials, args.gs_name,
args.sheet+" CRU map"))
if args.verbose:
print(df.to_string())
if args.chamber:
gencode_do(df, df_cru, solar_map, args.chamber)
if args.fecmapfile:
fec_string = df.to_string(
columns=["solar_id", "group_id", "de_id", "ds_id_0",
"ds_id_1", "ds_id_2", "ds_id_3", "ds_id_4"],
header=False,
index=False,
formatters={
"solar_id": lambda x: "%-6s" % x,
"group_id": lambda x: "%2s" % x,
"de_id": lambda x: "%9s " % x,
"ds_id_0": lambda x: " %-6s" % x,
"ds_id_1": lambda x: " %-6s" % x,
"ds_id_2": lambda x: " %-6s" % x,
"ds_id_3": lambda x: " %-6s" % x,
"ds_id_4": lambda x: " %-6s" % x,
})
fec_file = open(args.fecmapfile, "w")
fec_file.write(fec_string)
| gpl-3.0 |
petosegan/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 57 | 16523 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) | bsd-3-clause |
tetherless-world/ecoop | pyecoop/lib/ecoop/cf.py | 1 | 36336 | # /usr/bin/env python
# -*- coding: utf-8 -*-
# ##############################################################################
#
#
# Project: ECOOP, sponsored by The National Science Foundation
# Purpose: this code is part of the Cyberinfrastructure developed for the ECOOP project
# http://tw.rpi.edu/web/project/ECOOP
# from the TWC - Tetherless World Constellation
# at RPI - Rensselaer Polytechnic Institute
# founded by NSF
#
# Author: Massimo Di Stefano , [email protected] -
# http://tw.rpi.edu/web/person/MassimoDiStefano
#
###############################################################################
# Copyright (c) 2008-2014 Tetherless World Constellation at Rensselaer Polytechnic Institute
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from __future__ import print_function
import os
import envoy
from datetime import datetime
import numpy as np
import scipy.stats as sts
import statsmodels.api as sm
from scipy.interpolate import interp1d
import pandas as pd
import matplotlib.pyplot as plt
from ecoop.ecooputil import shareUtil as EU
lowess = sm.nonparametric.lowess
try:
from IPython.core.display import display
except:
print('you need to run this code from inside an IPython notebook in order to save provenance')
eu = EU()
from bokeh import pyplot
class cfData():
def __init__(self):
self.x = ''
def nao_get(self,
url="https://climatedataguide.ucar.edu/sites/default/files/climate_index_files/nao_station_djfm.txt",
save=None, csvout='nao.csv', prov=False):
"""
read NAO data from url and return a pandas dataframe
:param str url: url to data online default is set to :
https://climatedataguide.ucar.edu/sites/default/files/climate_index_files/nao_station_djfm.txt
:param str save: directory where to save raw data as csv
:return: naodata as pandas dataframe
:rtype: pandas dataframe
"""
#source_code_link = "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.nao_get"
try:
naodata = pd.read_csv(url, sep=' ', header=0, skiprows=0, index_col=0, parse_dates=True, skip_footer=1)
print('dataset used: %s' % url)
if save:
eu.ensure_dir(save)
output = os.path.join(save, csvout)
naodata.to_csv(output, sep=',', header=True, index=True, index_label='Date')
print('nao data saved in : ' + output)
if prov:
jsonld = {
"@id": "ex:NAO_dataset",
"@type": ["prov:Entity", "ecoop:Dataset"],
"ecoop_ext:hasCode": {
"@id": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.nao_get",
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": url,
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "csvout",
"ecoop_ext:parameter_value": csvout
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "save",
"ecoop_ext:parameter_value": save
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "url",
"ecoop_ext:parameter_value": url
}
]
}
}
display('cell-output metadata saved', metadata={'ecoop_prov': jsonld})
return naodata
except IOError:
print(
'unable to fetch the data, check if %s is a valid address and data is conform to AMO spec, for info about data spec. see [1]' % url)
# try cached version / history-linked-uri
def nin_get(self, url='http://www.cpc.ncep.noaa.gov/data/indices/sstoi.indices', save=None, csvout='nin.csv',
prov=False):
"""
read NIN data from url and return a pandas dataframe
:param str url: url to data online default is set to : http://www.cpc.ncep.noaa.gov/data/indices/sstoi.indices
:param str save: directory where to save raw data as csv
:return: nindata as pandas dataframe
:rtype: pandas dataframe
"""
try:
ts_raw = pd.read_table(url, sep=' ', header=0, skiprows=0, parse_dates=[['YR', 'MON']],
skipinitialspace=True,
index_col=0, date_parser=parse)
print('dataset used: %s' % url)
ts_year_group = ts_raw.groupby(lambda x: x.year).apply(lambda sdf: sdf if len(sdf) > 11 else None)
ts_range = pd.date_range(ts_year_group.index[0][1], ts_year_group.index[-1][1] + pd.DateOffset(months=1),
freq="M")
ts = pd.DataFrame(ts_year_group.values, index=ts_range, columns=ts_year_group.keys())
ts_fullyears_group = ts.groupby(lambda x: x.year)
nin_anomalies = (ts_fullyears_group.mean()['ANOM.3'] - sts.nanmean(
ts_fullyears_group.mean()['ANOM.3'])) / sts.nanstd(ts_fullyears_group.mean()['ANOM.3'])
nin_anomalies = pd.DataFrame(nin_anomalies.values,
index=pd.to_datetime([str(x) for x in nin_anomalies.index]))
nin_anomalies = nin_anomalies.rename(columns={'0': 'nin'})
nin_anomalies.columns = ['nin']
if save:
eu.ensure_dir(save)
output = os.path.join(save, csvout)
nin_anomalies.to_csv(output, sep=',', header=True, index=True, index_label='Date')
print('data saved as %s ' % output)
if prov:
function = {}
function['name'] = 'nin_get'
function['parameters'] = {}
function['parameters']['url'] = url
function['parameters']['save'] = save
function['parameters']['csvout'] = csvout
display('cell-output metadata saved', metadata={'nin_get': function})
jsonld = {
"@id": "ex:NIN_dataset",
"@type": ["prov:Entity", "ecoop:Dataset"],
"ecoop_ext:hasCode": {
"@id": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.nin_get",
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": url,
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "csvout",
"ecoop_ext:parameter_value": csvout
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "save",
"ecoop_ext:parameter_value": save
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "url",
"ecoop_ext:parameter_value": url
}
]
}
}
display('cell-output metadata saved', metadata={'ecoop_prov': jsonld})
return nin_anomalies
except IOError:
print(
'unable to fetch the data, check if %s is a valid address and data is conform to AMO spec, for info about data spec. see [1]' % url)
# try cached version / history-linked-uri
def parse(self, yr, mon):
"""
Convert year and month to a datatime object, day hardcoded to 2nd day of each month
:param yr: year date integer or string
:param mon: month date integer or string
:return: datatime object (time stamp)
:rtype: datatime
"""
date = datetime(year=int(yr), day=2, month=int(mon))
return date
def amo_get(self, url='http://www.cdc.noaa.gov/Correlation/amon.us.long.data', save=None, csvout='amo.csv',
prov=False):
"""
read AMO data from url and return a pandas dataframe
:param str url: url to data online default is set to : http://www.cdc.noaa.gov/Correlation/amon.us.long.data
:param str save: directory where to save raw data as csv
:return: amodata as pandas dataframe
:rtype: pandas dataframe
"""
try:
ts_raw = pd.read_table(url, sep=' ', skiprows=1,
names=['year', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec'], skipinitialspace=True, parse_dates=True, skipfooter=4,
index_col=0)
print('dataset used: %s' % url)
ts_raw.replace(-9.99900000e+01, np.NAN, inplace=True)
amodata = ts_raw.mean(axis=1)
amodata.name = "amo"
amodata = pd.DataFrame(amodata)
if save:
eu.ensure_dir(save)
output = os.path.join(save, csvout)
amodata.to_csv(output, sep=',', header=True, index=True, index_label='Date')
print('data saved as %s ' % output)
if prov:
function = {}
function['name'] = 'amo_get'
function['parameters'] = {}
function['parameters']['url'] = url
function['parameters']['save'] = save
function['parameters']['csvout'] = csvout
jsonld = {
"@id": "ex:AMO_dataset",
"@type": ["prov:Entity", "ecoop:Dataset"],
"ecoop_ext:hasCode": {
"@id": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.amo_get",
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.amo_get",
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "csvout",
"ecoop_ext:parameter_value": csvout
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "save",
"ecoop_ext:parameter_value": save
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "url",
"ecoop_ext:parameter_value": url
}
]
}
}
display('cell-output metadata saved', metadata={'ecoop_prov': jsonld})
return amodata
except:
print(
'unable to fetch the data, check if %s is a valid address and data is conform to AMO spec, for info about data spec. see [1]' % url)
# try cached version / history-linked-uri
class cfPlot():
def plot_index(self, data, name='Index',
nb=True, datarange=None,
xticks=10, xticks_fontsize=10,
dateformat=False, figsize=(10, 8),
xmargin=True, ymargin=True,
legend=True, smoother=None,
output=None, dpi=300,
grid=True, xlabel='Year',
ylabel='', title='',
win_size=10, win_type='boxcar',
center=False, std=0.1,
beta=0.1, power=1, width=1,
min_periods=None, freq=None,
scategory=None, frac=1. / 3, it=3, figsave=None, prov=False):
"""
Function to plot the Climate Forcing indicator for the ESR 2013, it follow graphic guidlines from the past ESR
adding functionalities like :
several kind of smoothline with different
:param data: pandas dataframe - input data
:param name: string - name used as dataframe index
:param nb: bolean if True the function is optimized to render the png inside a notebook
:param datarange: list of 2 integer for mak min year
:param xticks: integer xtick spacing default=10
:param xticks_fontsize: integer xticks fontsize default=10
:param dateformat: boolean if True set the xticks labels in date format
:param figsize: tuple figure size default (10, 8)
:param xmargin: bolean default True
:param ymargin: bolean default True
:param legend: bolean default True
:param smoother: tuple (f,i)
:param output: directory where to save output default None
:param dpi: integer
:param grid: bolean default True
:param xlabel: string default 'Year'
:param ylabel: string default ''
:param title: string default ''
:param win_size: integer default 10
:param win_type: string default 'boxcar'
:param center: bolean default False
:param std: float default 0.1
:param beta: float default 0.1
:param power: integer default 1
:param width: integer default 1
:param min_periods: None
:param freq: None
:param str scategory: default 'rolling'
:param float frac: default 0.6666666666666666 Between 0 and 1. The fraction of the data used when estimating each y-value.,
:param int it: default 3 The number of residual-based reweightings to perform.
"""
try:
assert type(data) == pd.core.frame.DataFrame
#x = data.index.year
#y = data.values
if datarange:
#if datarange != None :
mind = np.datetime64(str(datarange[0]))
maxd = np.datetime64(str(datarange[1]))
newdata = data.ix[mind:maxd]
x = newdata.index.year
y = newdata.values
else:
x = data.index.year
y = data.values
x_p = x[np.where(y >= 0)[0]]
y_p = y[np.where(y >= 0)[0]]
x_n = x[np.where(y < 0)[0]]
y_n = y[np.where(y < 0)[0]]
fig = plt.figure(figsize=figsize)
ax1 = fig.add_subplot(111)
ax1.bar(x_n, y_n, 0.8, facecolor='b', label=name + ' < 0')
ax1.bar(x_p, y_p, 0.8, facecolor='r', label=name + ' > 0')
ax1.grid(grid)
if ylabel != '':
ax1.set_ylabel(ylabel)
else:
ax1.set_ylabel(name)
if xlabel != '':
ax1.set_xlabel(xlabel)
else:
ax1.set_xlabel(xlabel)
if title == '':
ax1.set_title(name)
else:
ax1.set_title(title)
ax1.axhline(0, color='black', lw=1.5)
if xmargin:
ax1.set_xmargin(0.1)
if ymargin:
ax1.set_xmargin(0.1)
if legend:
ax1.legend()
if not figsave:
figsave = name + '.png'
if scategory == 'rolling':
newy = self.rolling_smoother(data, stype=smoother, win_size=win_size, win_type=win_type, center=center,
std=std,
beta=beta, power=power, width=width)
ax1.plot(newy.index.year, newy.values, lw=3, color='g')
if scategory == 'expanding':
newy = self.expanding_smoother(data, stype=smoother, min_periods=min_periods, freq=freq)
ax1.plot(newy.index.year, newy.values, lw=3, color='g')
if scategory == 'lowess':
x = np.array(range(0, len(data.index.values))).T
newy = pd.Series(lowess(data.values.flatten(), x, frac=frac, it=it).T[1], index=data.index)
ax1.plot(newy.index.year, newy, lw=3, color='g')
## interp 1D attempt
xx = np.linspace(min(data.index.year), max(data.index.year), len(newy))
f = interp1d(xx, newy)
xnew = np.linspace(min(data.index.year), max(data.index.year), len(newy) * 4)
f2 = interp1d(xx, newy, kind='cubic')
#xnew = np.linspace(min(data.index.values), max(data.index.values), len(newy)*2)
ax1.plot(xx, newy, 'o', xnew, f(xnew), '-', xnew, f2(xnew), '--')
##
if scategory == 'ewma':
print('todo')
plt.xticks(data.index.year[::xticks].astype('int'), data.index.year[::xticks].astype('int'),
fontsize=xticks_fontsize)
plt.autoscale(enable=True, axis='both', tight=True)
if dateformat:
fig.autofmt_xdate(bottom=0.2, rotation=75, ha='right')
if output:
eu.ensure_dir(output)
ffigsave = os.path.join(output, figsave)
plt.savefig(ffigsave, dpi=dpi)
print('graph saved in: %s ' % ffigsave)
if scategory:
smoutput = name + '_' + scategory + '.csv'
if smoother:
smoutput = name + '_' + scategory + '_' + smoother + '.csv'
smoutput = os.path.join(output, smoutput)
if scategory == 'lowess':
newdataframe = data.copy(deep=True)
newdataframe['smooth'] = pd.Series(newy, index=data.index)
newdataframe.to_csv(smoutput, sep=',', header=True, index=True, index_label='Year')
else:
newy.to_csv(smoutput, sep=',', header=True, index=True, index_label='Year')
print(name + ' smoothed data saved in : %s ' % smoutput)
if nb:
fig.subplots_adjust(left=-1.0)
fig.subplots_adjust(right=1.0)
#plt.show()
if prov:
function = {}
function['name'] = 'plot_index'
function['parameters'] = {}
function['parameters']['data'] = data
function['parameters']['name'] = name
function['parameters']['nb'] = nb
function['parameters']['datarange'] = datarange
function['parameters']['xticks'] = xticks
function['parameters']['xticks_fontsize'] = xticks_fontsize
function['parameters']['dateformat'] = dateformat
function['parameters']['figsize'] = figsize
function['parameters']['xmargin'] = xmargin
function['parameters']['ymargin'] = ymargin
function['parameters']['legend'] = legend
function['parameters']['smoother'] = smoother
function['parameters']['output'] = output
function['parameters']['dpi'] = dpi
function['parameters']['grid'] = grid
function['parameters']['xlabel'] = xlabel
function['parameters']['ylabel'] = ylabel
function['parameters']['title'] = title
function['parameters']['win_size'] = win_size
function['parameters']['win_type'] = win_type
function['parameters']['center'] = center
function['parameters']['std'] = std
function['parameters']['beta'] = beta
function['parameters']['power'] = power
function['parameters']['width'] = width
function['parameters']['min_periods'] = min_periods
function['parameters']['freq'] = freq
function['parameters']['scategory'] = scategory
function['parameters']['frac'] = frac
function['parameters']['it'] = it
function['parameters']['figsave'] = figsave
jsonld = {
"@id": "ex:NAO_figure",
"@type": ["prov:Entity", "ecoop:Figure"],
"ecoop_ext:hasData": "ecoop_data['NAO']",
"ecoop_ext:hasCode": {
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": "",
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "beta",
"ecoop_ext:parameter_value": beta
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "center",
"ecoop_ext:parameter_value": center
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "data",
"ecoop_ext:parameter_value": data
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "datarange",
"ecoop_ext:parameter_value": datarange
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "dateformat",
"ecoop_ext:parameter_value": dateformat
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "dpi",
"ecoop_ext:parameter_value": dpi
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "figsave",
"ecoop_ext:parameter_value": figsave
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "figsize",
"ecoop_ext:parameter_value": figsize
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "frac",
"ecoop_ext:parameter_value": frac
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "freq",
"ecoop_ext:parameter_value": freq
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "grid",
"ecoop_ext:parameter_value": grid
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "it",
"ecoop_ext:parameter_value": it
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "legend",
"ecoop_ext:parameter_value": legend
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "min_periods",
"ecoop_ext:parameter_value": min_periods
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "name",
"ecoop_ext:parameter_value": name
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "nb",
"ecoop_ext:parameter_value": nb
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "output",
"ecoop_ext:parameter_value": output
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "power",
"ecoop_ext:parameter_value": power
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "scategory",
"ecoop_ext:parameter_value": scategory
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "smoother",
"ecoop_ext:parameter_value": smoother
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "std",
"ecoop_ext:parameter_value": std
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "title",
"ecoop_ext:parameter_value": title
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "width",
"ecoop_ext:parameter_value": width
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "win_size",
"ecoop_ext:parameter_value": win_size
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "win_type",
"ecoop_ext:parameter_value": win_type
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xlabel",
"ecoop_ext:parameter_value": xlabel
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xmargin",
"ecoop_ext:parameter_value": xmargin
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xticks",
"ecoop_ext:parameter_value": xticks
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xticks_fontsize",
"ecoop_ext:parameter_value": xticks_fontsize
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "ylabel",
"ecoop_ext:parameter_value": ylabel
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "ymargin",
"ecoop_ext:parameter_value": ymargin
}
]
},
"ecoop_ext:usedSoftware": [{"@id": "ex:ecoop_software"}, {"@id": "ex:ipython_software"}]
}
display('cell-output metadata saved', metadata={'ecoop_prov': jsonld})
pyplot.show_bokeh(plt.gcf(), filename="subplots.html")
except AssertionError:
if type(data) != pd.core.frame.DataFrame:
print('input data not compatible, it has to be of type : pandas.core.frame.DataFrame')
print('data not loaded correctly')
def rolling_smoother(self, data, stype='rolling_mean', win_size=10, win_type='boxcar', center=False, std=0.1,
beta=0.1,
power=1, width=1):
"""
Perform a espanding smooting on the data for a complete help refer to http://pandas.pydata.org/pandas-docs/dev/computation.html
:param data:
:param stype:
:param win_size:
:param win_type:
:param center:
:param std:
:param beta:
:param power:
:param width:
:moothing types:
ROLLING :
rolling_count Number of non-null observations
rolling_sum Sum of values
rolling_mean Mean of values
rolling_median Arithmetic median of values
rolling_min Minimum
rolling_max Maximum
rolling_std Unbiased standard deviation
rolling_var Unbiased variance
rolling_skew Unbiased skewness (3rd moment)
rolling_kurt Unbiased kurtosis (4th moment)
rolling_window Moving window function
window types:
boxcar
triang
blackman
hamming
bartlett
parzen
bohman
blackmanharris
nuttall
barthann
kaiser (needs beta)
gaussian (needs std)
general_gaussian (needs power, width)
slepian (needs width)
"""
if stype == 'count':
newy = pd.rolling_count(data, win_size)
if stype == 'sum':
newy = pd.rolling_sum(data, win_size)
if stype == 'mean':
newy = pd.rolling_mean(data, win_size)
if stype == 'median':
newy = pd.rolling_median(data, win_size)
if stype == 'min':
newy = pd.rolling_min(data, win_size)
if stype == 'max':
newy = pd.rolling_max(data, win_size)
if stype == 'std':
newy = pd.rolling_std(data, win_size)
if stype == 'var':
newy = pd.rolling_var(data, win_size)
if stype == 'skew':
newy = pd.rolling_skew(data, win_size)
if stype == 'kurt':
newy = pd.rolling_kurt(data, win_size)
if stype == 'window':
if win_type == 'kaiser':
newy = pd.rolling_window(data, win_size, win_type, center=center, beta=beta)
if win_type == 'gaussian':
newy = pd.rolling_window(data, win_size, win_type, center=center, std=std)
if win_type == 'general_gaussian':
newy = pd.rolling_window(data, win_size, win_type, center=center, power=power, width=width)
else:
newy = pd.rolling_window(data, win_size, win_type, center=center)
return newy
def expanding_smoother(self, data, stype='rolling_mean', min_periods=None, freq=None):
"""
Perform a expanding smooting on the data for a complete help refer to http://pandas.pydata.org/pandas-docs/dev/computation.html
:param data: pandas dataframe input data
:param stype: soothing type
:param min_periods: periods
:param freq: frequence
smoothing types:
expanding_count Number of non-null observations
expanding_sum Sum of values
expanding_mean Mean of values
expanding_median Arithmetic median of values
expanding_min Minimum
expanding_max Maximum
expandingg_std Unbiased standard deviation
expanding_var Unbiased variance
expanding_skew Unbiased skewness (3rd moment)
expanding_kurt Unbiased kurtosis (4th moment)
"""
if stype == 'count':
newy = pd.expanding_count(data, min_periods=min_periods, freq=freq)
if stype == 'sum':
newy = pd.expanding_sum(data, min_periods=min_periods, freq=freq)
if stype == 'mean':
newy = pd.expanding_mean(data, min_periods=min_periods, freq=freq)
if stype == 'median':
newy = pd.expanding_median(data, min_periods=min_periods, freq=freq)
if stype == 'min':
newy = pd.expanding_min(data, min_periods=min_periods, freq=freq)
if stype == 'max':
newy = pd.expanding_max(data, min_periods=min_periods, freq=freq)
if stype == 'std':
newy = pd.expanding_std(data, min_periods=min_periods, freq=freq)
if stype == 'var':
newy = pd.expanding_var(data, min_periods=min_periods, freq=freq)
if stype == 'skew':
newy = pd.expanding_skew(data, min_periods=min_periods, freq=freq)
if stype == 'kurt':
newy = pd.expanding_kurt(data, min_periods=min_periods, freq=freq)
return newy
| apache-2.0 |
jamesgregson/easy_image_io | example.py | 1 | 1728 | import numpy as np
import easy_image_io as eiio
# return an image stored as planes (default for easy_image_io) as packed pixels
# suitable for plotting with matplotlib's imshow
def planes_to_packed( img ):
# create the output image
out = np.zeros( (img.shape[1],img.shape[2],img.shape[0]), dtype=img.dtype )
out[:,:,0] = img[0,:,:]
out[:,:,1] = img[1,:,:]
out[:,:,2] = img[2,:,:]
return out
# reverse of plane_to_packed, not needed here but listed for completeness
def packed_to_planes( img ):
# create the output image
out = np.zeros( (img.shape[2],img.shape[0],img.shape[1]), dtype=img.dtype )
out[0,:,:] = img[:,:,0]
out[1,:,:] = img[:,:,1]
out[2,:,:] = img[:,:,2]
return out
# make a 4-channel image 512 pixels wide and 256 pixels
# high and fill the channels with data
height = 1080;
width = 1920;
x,y = np.meshgrid( np.arange(width), np.arange(height) )
RGB = np.zeros( (3,height,width), dtype=np.uint16 )
RGB[0,:,:] = x*255
RGB[1,:,:] = y*255
RGB[2,:,:] = (x+y)*255
LUM = np.uint16((x+y)*255);
# save the 4 channel image as a png
eiio.imwrite( RGB, 'RGB.png' )
eiio.imwrite( RGB, 'RGB.tif' )
eiio.imwrite( LUM, 'LUM.png' )
eiio.imwrite( LUM, 'LUM.tif' )
tRGB = eiio.imread( 'RGB.png' )
print( 'Checking RGB PNG, error sum: %d ' % np.sum(np.abs(RGB.flatten()-tRGB.flatten())) )
tRGB = eiio.imread( 'RGB.tif' )
print( 'Checking RGB TIFF, error sum: %d ' % np.sum(np.abs(RGB.flatten()-tRGB.flatten())) )
tLUM = eiio.imread( 'LUM.png' )
print( 'Checking monochrome PNG, error sum: %d ' % np.sum(np.abs(LUM.flatten()-tLUM.flatten())) )
tLUM = eiio.imread( 'LUM.tif' )
print( 'Checking monochrome TIFF, error sum: %d ' % np.sum(np.abs(LUM.flatten()-tLUM.flatten())) )
| mit |
q1ang/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/feature_selection/tests/test_rfe.py | 56 | 11274 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater, assert_equal, assert_true
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Verifying that steps < 1 don't blow up.
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=.2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfecv_verbose_output():
# Check verbose=1 is producing an output.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
sys.stdout = StringIO()
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, verbose=1)
rfecv.fit(X, y)
verbose_output = sys.stdout
verbose_output.seek(0)
assert_greater(len(verbose_output.readline()), 0)
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
| mit |
Hezi-Resheff/trajectory-aa-move-ecol | metraj/graphics/simple_plots.py | 1 | 4453 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.patches import Ellipse, Circle
def plot_trajectory_scatter(frame, size_col=1):
frame.plot(kind="scatter", x=frame.geo_cols[0], y=frame.geo_cols[1], s=size_col)
plt.title(frame.id)
plt.show()
def plot_trajectory_ellipse(frame, varx="attr_VARX", vary="attr_VARY", covxy="attr_COVXY", opacity_factor=1):
"""
Draw the trajectory and uncertainty ellipses around teach point.
1) Scatter of points
2) Trajectory lines
3) Ellipses
:param frame: Trajectory
:param opacity_factor: all opacity values are multiplied by this. Useful when used to plot multiple Trajectories in
an overlay plot.
:return: axis
"""
ellipses = []
segments = []
start_point = None
for i, pnt in frame.iterrows():
# The ellipse
U, s, V = np.linalg.svd(np.array([[pnt[varx], pnt[covxy]],
[pnt[covxy], pnt[vary]]]), full_matrices=True)
w, h = s**.5
theta = np.arctan(V[1][0]/V[0][0]) # == np.arccos(-V[0][0])
ellipse = {"xy":pnt[list(frame.geo_cols)].values, "width":w, "height":h, "angle":theta}
ellipses.append(Ellipse(**ellipse))
# The line segment
x, y = pnt[list(frame.geo_cols)][:2]
if start_point:
segments.append([start_point, (x, y)])
start_point = (x, y)
ax = plt.gca()
ellipses = PatchCollection(ellipses)
ellipses.set_facecolor('none')
ellipses.set_color("green")
ellipses.set_linewidth(2)
ellipses.set_alpha(.4*opacity_factor)
ax.add_collection(ellipses)
frame.plot(kind="scatter", x=frame.geo_cols[0], y=frame.geo_cols[1], marker=".", ax=plt.gca(), alpha=opacity_factor)
lines = LineCollection(segments)
lines.set_color("gray")
lines.set_linewidth(1)
lines.set_alpha(.2*opacity_factor)
ax.add_collection(lines)
return ax
def plot_trajectory_multistage(traj, count, shape, style):
for i in range(count):
plt.subplot(shape[0], shape[1], i+1)
t = traj.get_next(depth=i)
plot_trajectory_ellipse(t)
plt.title(style["titles"][i])
plt.suptitle(style["suptitle"])
plt.show()
def trajectory_overlay_plot(trajectories, opacity):
"""
Overlay plot multiple trajectories with different opacity. This allows for instance, to plot a processed
Trajectory over the raw data, in order to see the change, or two stages in the processing side by side.
:param trajectories: list of Trajectory objects
:param opacity: list of opacity values, the same length as trajectories
:return: axis
"""
for t, o in zip(trajectories, opacity):
ax = plot_trajectory_ellipse(t, opacity_factor=o)
plt.show()
def plot_sparse_trajectory(trajectory, r=50, opacity_factor=1, plot_text=True,
num_col="segment_sparcify_n", min_static=100):
"""
Plots a sparsified trajectory as circles with the number of points they represent as a number inside.
:param trajectory: Trajectory object
:param r: the radius of circles
:param num_col: where to find the number to put in the circles
:param min_static: minimum count to change color of circle
:param plot_text: put the text with num of points in the circle?
:return: ax
"""
ax = plt.gca()
trajectory.plot(kind="scatter", x=trajectory.geo_cols[0], y=trajectory.geo_cols[1], marker=".",
ax=plt.gca(), alpha=0.0*opacity_factor)
circles = []
segments = []
start_point = None
for i, pnt in trajectory.iterrows():
circles.append(Circle(pnt[list(trajectory.geo_cols)].values, radius=r))
if plot_text:
plt.text(*pnt[list(trajectory.geo_cols)], s=str(int(pnt[num_col])), fontsize=12)
x, y = pnt[list(trajectory.geo_cols)][:2]
if start_point:
segments.append([start_point, (x, y)])
start_point = (x, y)
circles = PatchCollection(circles)
circles.set_facecolor(['none' if cnt < min_static else 'red' for cnt in trajectory[num_col].values])
circles.set_alpha(.5*opacity_factor)
ax.add_collection(circles)
lines = LineCollection(segments)
lines.set_color("gray")
lines.set_alpha(.2*opacity_factor)
ax.add_collection(lines)
return ax
| mit |
Nyker510/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/datasets/mlcomp.py | 8 | 3731 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
`set_` : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
for line in file(metadata_file):
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
for line in file(metadata_file):
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 48 | 4949 | import itertools
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
assert_array_almost_equal(D1, D2)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
fzalkow/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
clawpack/seismic | 2d/sloping_fault_water/dtopotools_horiz_okada.py | 2 | 80673 | #!/usr/bin/env python
# encoding: utf-8
r"""
GeoClaw dtopotools Module `$CLAW/geoclaw/src/python/geoclaw/dtopotools.py`
Module provides several functions for dealing with changes to topography (usually
due to earthquakes) including reading sub-fault specifications, writing out
dtopo files, and calculating Okada based deformations.
:Classes:
- DTopography
- SubFault
- Fault
- UCSBFault
- CSVFault
- SiftFault
- SegmentedPlaneFault
:Functions:
- convert_units
- plot_dz_contours
- plot_dz_colors
- Mw
- strike_direction
- rise_fraction
"""
import os
import sys
import re
import numpy
import clawpack.geoclaw.topotools as topotools
import clawpack.geoclaw.util as util
# ==============================================================================
# Constants
# ==============================================================================
from clawpack.geoclaw.data import DEG2RAD, LAT2METER
# Poisson ratio for Okada
poisson = 0.25
# ==============================================================================
# Units dictionaries
# ==============================================================================
# Dictionary for standard units to be used for all subfault models.
# The data might be read in from a file where different units are used,
# in which case the *input_units* argument of the *read* method can be used
# to indicate these units. The *read* function should then convert to these
# standard units:
standard_units = {}
standard_units['length'] = 'm'
standard_units['width'] = 'm'
standard_units['depth'] = 'm'
standard_units['slip'] = 'm'
standard_units['mu'] = 'Pa'
# Dictionary for converting input_units specified by user to or from
# standard units used internally:
# (Conversion is performed by the module function *convert_units*, which
# is called by *SubFault.convert_to_standard_units*)
unit_conversion_factor = {}
# for length, width, depth, slip: (standard units = 'm')
unit_conversion_factor['m'] = 1.
unit_conversion_factor['cm'] = 0.01
unit_conversion_factor['km'] = 1000.
unit_conversion_factor['nm'] = 1852.0 # nautical miles
# for rigidity (shear modulus) mu: (standard units = 'Pa')
unit_conversion_factor['Pa'] = 1.
unit_conversion_factor['GPa'] = 1.e9
unit_conversion_factor['dyne/cm^2'] = 0.1
unit_conversion_factor['dyne/m^2'] = 1.e-5
# for seismic moment Mo: (standard units = 'N-m', Newton-meters)
unit_conversion_factor['N-m'] = 1.
unit_conversion_factor['dyne-cm'] = 1.e-7
# Check that these are consistent:
check = [unit_conversion_factor[standard_units[param]] is 1. for param in \
standard_units.keys()]
if not numpy.alltrue(check):
raise ValueError("Conversion factors should be 1 for all standard_units")
# ==============================================================================
# General utility functions
# ==============================================================================
def convert_units(value, io_units, direction=1, verbose=False):
r"""
convert *value* to standard units from *io_units* or vice versa.
*io_units* (str) refers to the units used in the subfault file read or to be
written. The standard units are those used internally in this module.
See the comments below for the standard units.
If *direction==1*, *value* is in *io_units* and convert to standard.
If *direction==2*, *value* is in standard units and convert to *io_units*.
"""
try:
factor = unit_conversion_factor[io_units]
except:
factor = 1.
print "*** Warning: unrecoginized units in convert_units, not converting"
#raise ValueError("Unrecognized io_units %s, must be one of %s" \
# % (io_units, unit_conversion_factor.keys()))
if direction == 1:
converted_value = value * factor
elif direction == 2:
converted_value = value / factor
else:
raise ValueError("Unrecognized direction, must be 1 or 2")
return converted_value
def plot_dZ_contours(x, y, dZ, axes=None, dZ_interval=0.5, verbose=False,
fig_kwargs={}):
r"""For plotting seafloor deformation dZ"""
import matplotlib.pyplot as plt
dZ_max = max(dZ.max(), -dZ.min()) + dZ_interval
clines1 = numpy.arange(dZ_interval, dZ_max, dZ_interval)
clines = list(-numpy.flipud(clines1)) + list(clines1)
# Create axes if needed
if axes is None:
fig = plt.figure(**fig_kwargs)
axes = fig.add_subplot(111)
if len(clines) > 0:
if verbose:
print "Plotting contour lines at: ",clines
axes.contour(x, y, dZ, clines, colors='k')
else:
print "No contours to plot"
return axes
def plot_dZ_colors(x, y, dZ, axes=None, cmax_dZ=None, dZ_interval=None,
add_colorbar=True, verbose=False, fig_kwargs={}):
r"""
Plot sea floor deformation dZ as colormap with contours
"""
from clawpack.visclaw import colormaps
import matplotlib.pyplot as plt
if axes is None:
fig = plt.figure(**fig_kwargs)
axes = fig.add_subplot(1, 1, 1)
#print "+++ in plot_dZ_colors, axes = ",axes
#print "+++ in plot_dZ_colors, id(axes) = ",id(axes)
dZmax = numpy.abs(dZ).max()
if cmax_dZ is None:
if dZmax < 1.e-12:
cmax_dZ = 0.1
else:
cmax_dZ = dZmax
cmap = colormaps.blue_white_red
extent = [x.min(), x.max(), y.min(), y.max()]
im = axes.imshow(dZ, extent=extent, cmap=cmap, origin='lower')
im.set_clim(-cmax_dZ,cmax_dZ)
if add_colorbar:
cbar = plt.colorbar(im, ax=axes)
cbar.set_label("Deformation (m)")
if dZ_interval is None:
dZ_interval = cmax_dZ/10.
clines1 = numpy.arange(dZ_interval, dZmax + dZ_interval, dZ_interval)
clines = list(-numpy.flipud(clines1)) + list(clines1)
if len(clines) > 0:
if verbose:
print "Plotting contour lines at: ",clines
axes.contour(x,y,dZ,clines,colors='k',linestyles='solid')
elif verbose:
print "No contours to plot"
y_ave = 0.5 * (y.min() + y.max())
axes.set_aspect(1. / numpy.cos(y_ave * numpy.pi / 180.))
axes.ticklabel_format(format='plain', useOffset=False)
axes.set_title('Seafloor deformation')
for label in axes.get_xticklabels():
label.set_rotation(20)
return axes
def Mw(Mo, units="N-m"):
"""
Calculate moment magnitude based on seismic moment Mo.
Follows USGS recommended definition from
http://earthquake.usgs.gov/aboutus/docs/020204mag_policy.php
The SubFault and Fault classes each have a function Mo to compute
the seismic moment for a single subfault or collection respectively.
"""
if units == "N-m":
Mw = 2/3.0 * (numpy.log10(Mo) - 9.05)
elif units == "dyne-cm":
Mw = 2/3.0 * numpy.log10(Mo) - 10.7
# = 2/3.0 * (numpy.log10(1e-7 * Mo) - 9.05)
else:
raise ValueError("Unknown unit for Mo: %s." % units)
return Mw
def strike_direction(x1, y1, x2, y2):
"""
Calculate strike direction between two points.
Actually calculates "initial bearing" from (x1,y1) in direction
towards (x2,y2), following
http://www.movable-type.co.uk/scripts/latlong.html
"""
x1 = x1*numpy.pi/180.
y1 = y1*numpy.pi/180.
x2 = x2*numpy.pi/180.
y2 = y2*numpy.pi/180.
dx = x2-x1
theta = numpy.arctan2(numpy.sin(dx)*numpy.cos(y2), \
numpy.cos(y1)*numpy.sin(y2) \
- numpy.sin(y1)*numpy.cos(y2)*numpy.cos(dx))
s = theta*180./numpy.pi
if s<0:
s = 360+s
return s
def rise_fraction(t, t0, t_rise, t_rise_ending=None):
"""
A continuously differentiable piecewise quadratic function of t that is
* 0 for t <= t0,
* 1 for t >= t0 + t_rise + t_rise_ending
with maximum slope at t0 + t_rise.
For specifying dynamic fault ruptures: Subfault files often contain these
parameters for each subfault for an earthquake event.
*t* can be a scalar or a numpy array of times and the returned result
will have the same type. A list or tuple of times returns a numpy array.
"""
scalar = (type(t) in [float,int])
t = numpy.array(t)
if t_rise_ending is None:
t_rise_ending = t_rise
t1 = t0+t_rise
t2 = t1+t_rise_ending
rf = numpy.where(t<=t0, 0., 1.)
if t2 != t0:
t20 = float(t2-t0)
t10 = float(t1-t0)
t21 = float(t2-t1)
c1 = t21 / (t20*t10*t21)
c2 = t10 / (t20*t10*t21)
rf = numpy.where((t>t0) & (t<=t1), c1*(t-t0)**2, rf)
rf = numpy.where((t>t1) & (t<=t2), 1. - c2*(t-t2)**2, rf)
if scalar:
rf = float(rf) # return a scalar if input t is scalar
return rf
# ==============================================================================
# DTopography Base Class
# ==============================================================================
class DTopography(object):
r"""Basic object representing moving topography
"""
def __init__(self, path=None, dtopo_type=None):
r"""DTopography initialization routine.
See :class:`DTopography` for more info.
"""
self.dZ = None
self.times = []
self.x = None
self.y = None
self.X = None
self.Y = None
self.delta = None
self.path = path
if path:
self.read(path, dtopo_type)
def read(self, path=None, dtopo_type=None, verbose=False):
r"""
Read in a dtopo file and use to set attributes of this object.
:input:
- *path* (path) - Path to existing dtopo file to read in.
- *dtopo_type* (int) - Type of topography file to read. Default is 3
if not specified or apparent from file extension.
"""
if path is not None:
self.path = path
else:
if self.path is None:
raise ValueError("Need to specify a path to a file.")
else:
path = self.path
if dtopo_type is None:
dtopo_type = topotools.determine_topo_type(path, default=3)
if dtopo_type == 1:
data = numpy.loadtxt(path)
if verbose:
print "Loaded file %s with %s lines" %(path,data.shape[0])
t = list(set(data[:,0]))
t.sort()
if verbose:
print "times found: ",t
ntimes = len(t)
tlast = t[-1]
lastlines = data[data[:,0]==tlast]
xvals = list(set(lastlines[:,1]))
xvals.sort()
mx = len(xvals)
my = len(lastlines) / mx
if verbose:
print "Read dtopo: mx=%s and my=%s, at %s times" % (mx,my,ntimes)
X = numpy.reshape(lastlines[:,1],(my,mx))
Y = numpy.reshape(lastlines[:,2],(my,mx))
Y = numpy.flipud(Y)
if verbose:
print "Returning dZ as a list of mx*my arrays"
dZ = None
for n in range(ntimes):
i1 = n*mx*my
i2 = (n+1)*mx*my
dzt = numpy.reshape(data[i1:i2,3],(my,mx))
dzt = numpy.flipud(dzt)
dzt = numpy.array(dzt, ndmin=3) # convert to 3d array
if dZ is None:
dZ = dzt.copy()
else:
dZ = numpy.append(dZ, dzt, axis=0)
self.X = X
self.Y = Y
self.x = X[0,:]
self.y = Y[:,0]
self.times = t
self.dZ = dZ
elif dtopo_type == 2 or dtopo_type == 3:
fid = open(path)
mx = int(fid.readline().split()[0])
my = int(fid.readline().split()[0])
mt = int(fid.readline().split()[0])
xlower = float(fid.readline().split()[0])
ylower = float(fid.readline().split()[0])
t0 = float(fid.readline().split()[0])
dx = float(fid.readline().split()[0])
dy = float(fid.readline().split()[0])
dt = float(fid.readline().split()[0])
fid.close()
xupper = xlower + (mx-1)*dx
yupper = ylower + (my-1)*dy
x=numpy.linspace(xlower,xupper,mx)
y=numpy.linspace(ylower,yupper,my)
times = numpy.linspace(t0, t0+(mt-1)*dt, mt)
dZvals = numpy.loadtxt(path, skiprows=9)
if dtopo_type==3:
# my lines with mx values on each
for k,t in enumerate(times):
dZk = numpy.reshape(dZvals[k*my:(k+1)*my, :], (my,mx))
dZk = numpy.flipud(dZk)
dZk = numpy.array(dZk, ndmin=3) # convert to 3d array
if k==0:
dZ = dZk.copy()
else:
dZ = numpy.append(dZ, dZk, axis=0)
else:
# dtopo_type==2 ==> mx*my lines with 1 values on each
for k,t in enumerate(times):
dZk = numpy.reshape(dZvals[k*mx*my:(k+1)*mx*my], (my,mx))
dZk = numpy.flipud(dZk)
dZk = numpy.array(dZk, ndmin=3) # convert to 3d array
if k==0:
dZ = dZk.copy()
else:
dZ = numpy.append(dZ, dZk, axis=0)
self.x = x
self.y = y
self.X, self.Y = numpy.meshgrid(x,y)
self.times = times
self.dZ = dZ
else:
raise ValueError("Only topography types 1, 2, and 3 are supported,",
" given %s." % dtopo_type)
def write(self, path=None, dtopo_type=None):
r"""Write out subfault resulting dtopo to file at *path*.
:input:
- *path* (path) - Path to the output file to written to.
- *dtopo_type* (int) - Type of topography file to write out. Default
is 3.
"""
if path is not None:
self.path = path
if self.path is None:
raise IOError("*** need to specify path to file for writing")
path = self.path
if dtopo_type is None:
dtopo_type = topotools.determine_topo_type(path, default=3)
x = self.X[0,:]
y = self.Y[:,0]
dx = x[1] - x[0]
dy = y[1] - y[0]
if abs(dx - dy) >= 1e-12:
raise ValueError("dx = %g not equal to dy = %g" % (dx,dy))
# Construct each interpolating function and evaluate at new grid
## Shouldn't need to interpolate in time.
with open(path, 'w') as data_file:
if dtopo_type == 0:
# Topography file with 3 columns, x, y, dz written from the
# upper left corner of the region. Only final time.
Y_flipped = numpy.flipud(self.Y)
dZ_flipped = numpy.flipud(self.dZ[-1,:,:])
for j in xrange(self.Y.shape[0]):
for i in xrange(self.X.shape[1]):
data_file.write("%s %s %s\n" % self.X[j,i],
Y_flipped[j,i], dZ_flipped[j,i])
elif dtopo_type == 1:
# Topography file with 4 columns, t, x, y, dz written from the
# upper
# left corner of the region
Y_flipped = numpy.flipud(self.Y)
for (n, time) in enumerate(self.times):
#alpha = (time - self.t[0]) / self.t[-1]
#dZ_flipped = numpy.flipud(alpha * self.dZ[:,:])
dZ_flipped = numpy.flipud(self.dZ[n,:,:])
for j in xrange(self.Y.shape[0]):
for i in xrange(self.X.shape[1]):
data_file.write("%s %s %s %s\n" % (self.times[n],
self.X[j,i], Y_flipped[j,i], dZ_flipped[j,i]))
elif dtopo_type == 2 or dtopo_type == 3:
if len(self.times) == 1:
dt = 0.
else:
dt = float(self.times[1] - self.times[0])
# Write out header
data_file.write("%7i mx \n" % x.shape[0])
data_file.write("%7i my \n" % y.shape[0])
data_file.write("%7i mt \n" % len(self.times))
data_file.write("%20.14e xlower\n" % x[0])
data_file.write("%20.14e ylower\n" % y[0])
data_file.write("%20.14e t0\n" % self.times[0])
data_file.write("%20.14e dx\n" % dx)
data_file.write("%20.14e dy\n" % dy)
data_file.write("%20.14e dt\n" % dt)
if dtopo_type == 2:
raise ValueError("Topography type 2 is not yet supported.")
elif dtopo_type == 3:
for (n, time) in enumerate(self.times):
#alpha = (time - self.t[0]) / (self.t[-1])
for j in range(self.Y.shape[0]-1, -1, -1):
data_file.write(self.X.shape[1] * '%012.6e '
% tuple(self.dZ[n,j,:]))
data_file.write("\n")
else:
raise ValueError("Only topography types 1, 2, and 3 are ",
"supported, given %s." % dtopo_type)
def dZ_at_t(self, t):
"""
Interpolate dZ to specified time t and return deformation.
"""
from matplotlib.mlab import find
if t <= self.times[0]:
return self.dZ[0,:,:]
elif t >= self.times[-1]:
return self.dZ[-1,:,:]
else:
n = max(find(self.times <= t))
t1 = self.times[n]
t2 = self.times[n+1]
dz = (t2-t)/(t2-t1) * self.dZ[n,:,:] + \
(t-t1)/(t2-t1) * self.dZ[n+1,:,:]
return dz
def dZ_max(self):
r"""Return max(abs(dZ)) over all dz in self.dZ, the maximum
surface deformation for this dtopo.
DEPRECATE? -- it's now a 1-liner
"""
return abs(self.dZ).max()
def plot_dZ_colors(self, t, axes=None, cmax_dZ=None, dZ_interval=None,
fig_kwargs={}):
"""
Interpolate self.dZ to specified time t and then call module function
plot_dZ_colors.
"""
axes = plot_dZ_colors(self.X, self.Y, self.dZ_at_t(t), axes=axes,
cmax_dZ=cmax_dZ, dZ_interval=dZ_interval,
fig_kwargs=fig_kwargs)
return axes
def plot_dZ_contours(self, t, dZ_interval=0.5, axes=None, fig_kwargs={}):
"""
Interpolate self.dZ to specified time t and then call module function
plot_dZ_contours.
"""
axes = plot_dZ_contours(self.X, self.Y, self.dZ_at_t(t), axes=axes,
dZ_interval=dZ_interval)
return axes
# ==============================================================================
# Generic Fault Class
# ==============================================================================
class Fault(object):
r"""Base Fault class
A class describing a fault possibly composed of subfaults.
:Properties:
:Initialization:
:Examples:
"""
def __init__(self, subfaults=None, input_units={},
coordinate_specification=None):
r"""Fault initialization routine.
See :class:`Fault` for more info.
"""
# Parameters for subfault specification
self.rupture_type = 'static' # 'static' or 'dynamic'
#self.times = numpy.array([0., 1.]) # or just [0.] ??
self.dtopo = None
# Default units of each parameter type
self.input_units = standard_units.copy()
self.input_units.update(input_units)
# Set the coordinate specification, e.g. 'top center':
self.coordinate_specification = coordinate_specification
if subfaults is not None:
if not isinstance(subfaults, list):
raise ValueError("Input parameter subfaults must be a list.")
self.subfaults = subfaults
for subfault in self.subfaults:
subfault.convert_to_standard_units(self.input_units)
if subfault.coordinate_specification is None:
subfault.coordinate_specification = coordinate_specification
if subfault.coordinate_specification is None:
raise ValueError("Must specify coordinate_specification, " + \
"either for fault or for each subfault")
def read(self, path, column_map, coordinate_specification="centroid",
rupture_type="static", skiprows=0,
delimiter=None, input_units={}, defaults=None):
r"""Read in subfault specification at *path*.
Creates a list of subfaults from the subfault specification file at
*path*.
:Inputs:
- *path* (str) file to read in, should contain subfaults, one per line
- *column_map* (dict) specifies mapping from parameter to the column
of the input file that contains values for this parameter, e.g.
column_map = {"latitude":0, "longitude":1, "depth":2, "slip":3,
"rake":4, "strike":5, "dip":6}
- *coordinate_specification* (str) specifies the location on each
subfault that corresponds to the (longitude,latitude) and depth
of the subfault. See the documentation for
*SubFault.calculate_geometry*.
- *rupture_type* (str) either "static" or "dynamic"
- *skiprows* (int) number of header lines to skip before data
- *delimiter* (str) e.g. ',' for csv files
- *input_units* (dict) indicating units for length, width, slip, depth,
and for rigidity mu as specified in file. These
will be converted to "standard units".
- *defaults* (dict) default values for all subfaults, for values not
included in subfault file on each line.
"""
# Read in rest of data
# (Use genfromtxt to deal with files containing strings, e.g. unit
# source name, in some column)
data = numpy.genfromtxt(path, skip_header=skiprows, delimiter=delimiter)
if len(data.shape) == 1:
data = numpy.array([data])
self.coordinate_specification = coordinate_specification
self.input_units = standard_units.copy()
self.input_units.update(input_units)
self.subfaults = []
for n in xrange(data.shape[0]):
new_subfault = SubFault()
new_subfault.coordinate_specification = coordinate_specification
for (var, column) in column_map.iteritems():
if isinstance(column, tuple) or isinstance(column, list):
setattr(new_subfault, var, [None for k in column])
for (k, index) in enumerate(column):
getattr(new_subfault, var)[k] = data[n, index]
else:
setattr(new_subfault, var, data[n, column])
if defaults is not None:
for param in defaults.iterkeys():
setattr(new_subfault, param, defaults[param])
new_subfault.convert_to_standard_units(self.input_units)
self.subfaults.append(new_subfault)
def write(self, path, style=None, column_list=None, output_units={},
delimiter=' '):
r"""
Write subfault format file with one line for each subfault.
Can either specify a *style* that determines the columns,
or a *column_list*. Must specify one but not both. See below for
details.
Inputs:
- *path* (str) file to write to.
- *style* (str) to write in a style that matches standard styles
adopted by various groups. One of the following:
- "usgs" (Not implemented)
- "noaa sift" (Not implemented)
- "ucsb" (Not implemented)
- *column_list* (list) specifies what order the parameters should
be written in the output file, e.g.
column_list = ['longitude','latitude','length','width',
'depth','strike','rake','dip','slip']
- *output_units* (dict) specifies units to convert to before writing.
Defaults to "standard units".
- *delimiter* (str) specifies delimiter between columns, e.g.
"," to create a csv file. Defaults to " ".
"""
self.output_units = standard_units.copy()
self.output_units.update(output_units)
if style is not None:
msg = "style option not yet implemented, use column_list"
raise NotImplementedError(msg)
if column_list is None:
raise Exception("Must specify column_list")
format = {}
format['longitude'] = '%15.5f'
format['latitude'] = '%15.5f'
format['strike'] = '%15.5f'
format['rake'] = '%15.5f'
format['dip'] = '%15.5f'
format['depth'] = '%15.8e'
format['length'] = '%15.8e'
format['width'] = '%15.8e'
format['slip'] = '%15.8e'
with open(path, 'w') as data_file:
c_s_list = set([s.coordinate_specification for s in self.subfaults])
if (len(c_s_list) >= 1) and \
(c_s_list.pop() != self.coordinate_specification):
raise ValueError("Subfaults do not have common " +
"coordinate_specification that agrees with fault attribute")
# write header:
data_file.write('Subfaults file with coordinate_specification: ')
data_file.write('%s, \n' % self.coordinate_specification)
data_file.write('Units: %s, \n' % str(output_units))
s = ""
for param in column_list:
s = s + param.rjust(15) + delimiter
data_file.write(s + '\n')
for subfault in self.subfaults:
s = ""
for param in column_list:
value = getattr(subfault,param)
if output_units.has_key(param):
converted_value = convert_units(value,
self.output_units[param], direction=2)
s = s + format[param] % value + delimiter
data_file.write(s + '\n')
def Mo(self):
r"""
Calculate the seismic moment for a fault composed of subfaults,
in units N-m.
"""
total_Mo = 0.0
for subfault in self.subfaults:
total_Mo += subfault.Mo()
return total_Mo
def Mw(self):
r"""Calculate the moment magnitude for a fault composed of subfaults."""
return Mw(self.Mo())
def create_dtopography(self, x, y, times=[0., 1.], y_disp=False,
x_disp=False, verbose=False):
r"""Compute change in topography and construct a dtopography object.
Use subfaults' `okada` routine and add all
deformations together.
Raises a ValueError exception if the *rupture_type* is an unknown type.
returns a :class`DTopography` object.
"""
dtopo = DTopography()
dtopo.x = x
dtopo.y = y
X, Y = numpy.meshgrid(x,y)
dtopo.X = X
dtopo.Y = Y
dtopo.times = times
if verbose:
print "Making Okada dz for each of %s subfaults" \
% len(self.subfaults)
for k,subfault in enumerate(self.subfaults):
if verbose:
sys.stdout.write("%s.." % k)
sys.stdout.flush()
subfault.okada(x,y,y_disp=y_disp,x_disp=x_disp)
# sets subfault.dtopo with times=[0]
# and subfault.dtopo.dZ.shape[0] == 1
if verbose:
sys.stdout.write("\nDone\n")
if self.rupture_type == 'static':
if len(times) > 2:
raise ValueError("For static deformation, need len(times) <= 2")
dz = numpy.zeros(X.shape)
for subfault in self.subfaults:
dz += subfault.dtopo.dZ[0,:,:]
if len(times) == 1:
# only final deformation stored:
dtopo.dZ = numpy.array(dz, ndmin=3)
elif len(times) == 2:
# store 0 at first time and final deformation at second:
dz0 = numpy.zeros(X.shape)
dtopo.dZ = numpy.array([dz0, dz])
if dtopo.dZ.shape != (2, dz.shape[0], dz.shape[1]):
raise ValueError("dtopo.dZ does not have expected shape")
if y_disp:
if len(times) > 2:
raise ValueError("For static deformation, need len(times) <= 2")
dy = numpy.zeros(X.shape)
for subfault in self.subfaults:
dy += subfault.dtopo.dY[0,:,:]
if len(times) == 1:
# only final deformation stored:
dtopo.dY = numpy.array(dy, ndmin=3)
elif len(times) == 2:
# store 0 at first time and final deformation at second:
dy0 = numpy.zeros(X.shape)
dtopo.dY = numpy.array([dy0, dy])
# !!!! need to add dynamic and x_disp !!!!
elif self.rupture_type in ['dynamic','kinematic']:
t_prev = -1.e99
dzt = numpy.zeros(X.shape)
dZ = None
for t in times:
for k,subfault in enumerate(self.subfaults):
t0 = getattr(subfault,'rupture_time',0)
t1 = getattr(subfault,'rise_time',0.5)
t2 = getattr(subfault,'rise_time_ending',None)
rf = rise_fraction([t_prev,t],t0,t1,t2)
dfrac = rf[1] - rf[0]
if dfrac > 0.:
dzt = dzt + dfrac * subfault.dtopo.dZ[0,:,:]
dzt = numpy.array(dzt, ndmin=3) # convert to 3d array
if dZ is None:
dZ = dzt.copy()
else:
dZ = numpy.append(dZ, dzt, axis=0)
t_prev = t
dtopo.dZ = dZ
else:
raise ValueError("Unrecognized rupture_type: %s" % self.rupture_type)
# Store for user
self.dtopo = dtopo
return dtopo
def plot_subfaults(self, axes=None, plot_centerline=False, slip_color=False,
cmap_slip=None, cmin_slip=None, cmax_slip=None,
slip_time=None, plot_rake=False, xylim=None,
plot_box=True, colorbar_shrink=1, verbose=False):
"""
Plot each subfault projected onto the surface.
*axes* can be passed in to specify the *matplotlib.axes.AxesSubplot*
on which to add this plot. If *axes == None*, a new figure window
will be opened. The *axes* on which it is plotted is the return
value of this call.
If *plot_centerline == True*, plot a line from the centroid to the
top center of each subfault to show what direction is up-dip.
If *slip_color == True* then use the color map *cmap_slip*
(which defaults to *matplotlib.cm.jet*) to color the subplots based
on the magnitude of slip, scaled between *cmin_slip* and *cmax_slip*.
(If these are *None* then scaled automatically based on range of slip.)
If *slip_time == None* then colors are based on the final slip.
For dynamic faults, *slip_time* can be set to a time and the
dynamic timing of each subfault will be used to compute and
plot the slip at this time.
If *plot_rake == True*, plot a line from the centroid pointing in
the direction of the rake (the direction in which the top block is
moving relative to the lower block. The distance it moves is given
by the *slip*.)
*xylim* can be set to a list or tuple of length 4 of the form
[x1,x2,y1,y2] to specify the x- and y-axis limits.
If *plot_box == True*, a box will be drawn around each subfault.
"""
import matplotlib
import matplotlib.pyplot as plt
if (slip_time is not None) and (self.rupture_type == 'static'):
raise Exception("slip_time can only be specified for dynamic faults")
if axes is None:
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
max_slip = 0.
min_slip = 0.
for subfault in self.subfaults:
slip = subfault.slip
max_slip = max(abs(slip), max_slip)
min_slip = min(abs(slip), min_slip)
if verbose:
print "Max slip, Min slip: ",max_slip, min_slip
if slip_color:
if cmap_slip is None:
cmap_slip = matplotlib.cm.jet
#white_purple = colormaps.make_colormap({0.:'w', 1.:[.6,0.2,.6]})
#cmap_slip = white_purple
if cmax_slip is None:
cmax_slip = max_slip
if cmin_slip is None:
cmin_slip = 0.
y_ave = 0.
for subfault in self.subfaults:
x_top = subfault.centers[0][0]
y_top = subfault.centers[0][1]
x_centroid = subfault.centers[1][0]
y_centroid = subfault.centers[1][1]
x_corners = [subfault.corners[2][0],
subfault.corners[3][0],
subfault.corners[0][0],
subfault.corners[1][0],
subfault.corners[2][0]]
y_corners = [subfault.corners[2][1],
subfault.corners[3][1],
subfault.corners[0][1],
subfault.corners[1][1],
subfault.corners[2][1]]
y_ave += y_centroid
# Plot projection of planes to x-y surface:
if plot_centerline:
axes.plot([x_top],[y_top],'bo',label="Top center")
axes.plot([x_centroid],[y_centroid],'ro',label="Centroid")
axes.plot([x_top,x_centroid],[y_top,y_centroid],'r-')
if plot_rake:
tau = (subfault.rake - 90) * numpy.pi/180.
axes.plot([x_centroid],[y_centroid],'go',markersize=5,label="Centroid")
dxr = x_top - x_centroid
dyr = y_top - y_centroid
x_rake = x_centroid + numpy.cos(tau)*dxr - numpy.sin(tau)*dyr
y_rake = y_centroid + numpy.sin(tau)*dxr + numpy.cos(tau)*dyr
axes.plot([x_rake,x_centroid],[y_rake,y_centroid],'g-',linewidth=1)
if slip_color:
if slip_time is not None:
slip = subfault.dynamic_slip(slip_time)
else:
slip = subfault.slip
s = min(1, max(0, (slip-cmin_slip)/(cmax_slip-cmin_slip)))
c = cmap_slip(s*.99) # since 1 does not map properly with jet
axes.fill(x_corners,y_corners,color=c,edgecolor='none')
if plot_box:
axes.plot(x_corners, y_corners, 'k-')
slipax = axes
y_ave = y_ave / len(self.subfaults)
slipax.set_aspect(1./numpy.cos(y_ave*numpy.pi/180.))
if xylim is not None:
slipax.set_xlim(xylim[:2])
slipax.set_ylim(xylim[2:])
if slip_color:
if slip_time is None:
slipax.set_title('Slip on fault')
else:
slipax.set_title('Slip on fault at time %6.1fs' % slip_time)
else:
slipax.set_title('Fault planes')
slipax.ticklabel_format(format='plain', useOffset=False)
for label in slipax.get_xticklabels():
label.set_rotation(20)
if slip_color and (colorbar_shrink > 0):
cax,kw = matplotlib.colorbar.make_axes(slipax,
shrink=colorbar_shrink)
norm = matplotlib.colors.Normalize(vmin=cmin_slip,vmax=cmax_slip)
cb1 = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap_slip, norm=norm)
cb1.set_label("Slip (m)")
plt.sca(slipax) # reset the current axis to the main figure
return slipax
def plot_subfaults_depth(self, axes=None):
"""
Plot the depth of each subfault vs. x and vs. y in a second plot.
"""
import matplotlib.pyplot as plt
if axes is None:
fig, axes = plt.subplots(nrows=2, ncols=1)
else:
if len(axes) != 2:
raise ValueError("The *axes* argument should be a list of ",
"axes objects of length == 2.")
for subfault in self.subfaults:
x_top = subfault.centers[0][0]
y_top = subfault.centers[0][1]
depth_top = subfault.centers[0][2]
x_bottom = subfault.centers[2][0]
y_bottom = subfault.centers[2][1]
depth_bottom = subfault.centers[2][2]
# Plot planes in x-z and y-z to see depths:
axes[0].plot([x_top, x_bottom], [-depth_top, -depth_bottom])
axes[1].plot([y_top, y_bottom], [-depth_top, -depth_bottom])
axes[0].set_title('depth vs. x')
axes[1].set_title('depth vs. y')
return axes
def containing_rect(self):
r"""Find containing rectangle of fault in x-y plane.
Returns tuple of x-limits and y-limits.
"""
extent = [numpy.infty, -numpy.infty, numpy.infty, -numpy.infty]
for subfault in self.subfaults:
for corner in subfault.corners:
extent[0] = min(corner[0], extent[0])
extent[1] = max(corner[0], extent[1])
extent[2] = min(corner[1], extent[2])
extent[3] = max(corner[1], extent[3])
return extent
def create_dtopo_xy(self, rect=None, dx=1/60., buffer_size=0.5):
r"""Create coordinate arrays containing fault with a buffer.
:Input:
- *rect* - if None, use self.containing_rect
Otherwise a list [x1,x2,y1,y2]
- *dx* (int) - Spatial resolution. Defaults to 1" resolution.
- *buffer_size* (float) - Buffer distance around edge of fault in
degrees, defaults to 0.5 degrees.
:Output:
- *x,y* 1-dimensional arrays that cover the desired rect.
They start at (x1,y1) and may go a bit beyond (x2,y2) depending on dx
"""
if rect is None:
rect = self.containing_rect()
rect[0] -= buffer_size
rect[1] += buffer_size
rect[2] -= buffer_size
rect[3] += buffer_size
mx = int(numpy.ceil(rect[1] - rect[0]) / dx) + 1
x1 = rect[0]
x2 = x1 + (mx-1)*dx
my = int(numpy.ceil(rect[3] - rect[2]) / dx) + 1
y1 = rect[2]
y2 = y1 + (my-1)*dx # note dy==dx
x = numpy.linspace(x1,x2,mx)
y = numpy.linspace(y1,y2,my)
return x,y
def set_dynamic_slip(self, t):
r"""
Set *slip_at_dynamic_t* attribute of all subfaults to slip at the
requested time *t*.
:Input:
- *t* (float) -
Raises a ValueError exception if this object's rupture_type attribute
is set to static.
"""
if self.rupture_type is 'static':
raise ValueError("Rupture type is set to static.")
self.dynamic_t = t
for subfault in self.subfaults:
subfault.slip_at_dynamic_t = subfault.dynamic_slip(t)
# ==============================================================================
# Sub-Fault Class
# ==============================================================================
class SubFault(object):
r"""Basic sub-fault specification.
Locate fault plane in 3D space
Note that the coodinate specification is in reference to the fault
:Coordinates of Fault Plane:
The attributes *centers* and *corners* are described by the figure below.
*centers[0,1,2]* refer to the points labeled 0,1,2 below.
In particular the centroid is given by *centers[1]*.
Each will be a tuple *(x, y, depth)*.
*corners[0,1,2,3]* refer to the points labeled a,b,c,d resp. below.
Each will be a tuple *(x, y, depth)*.
Top edge Bottom edge
a ----------- b ^
| | | ^
| | | |
| | | | along-strike direction
| | | |
0------1------2 | length |
| | |
| | |
| | |
| | |
d ----------- c v
<------------->
width
<-- up dip direction
"""
@property
def corners(self):
r"""Coordinates of the corners of the fault plane."""
if self._corners is None:
self.calculate_geometry()
return self._corners
@property
def centers(self):
r"""Coordinates along the center-line of the fault plane."""
if self._centers is None:
self.calculate_geometry()
return self._centers
def __init__(self):
r"""SubFault initialization routine.
See :class:`SubFault` for more info.
"""
super(SubFault, self).__init__()
self.strike = None
r"""Strike direction of subfault in degrees."""
self.length = None
r"""Length of subfault in meters."""
self.width = None
r"""Width of subfault in meters."""
self.depth = None
r"""Depth of subfault based on *coordinate_specification* in meters."""
self.slip = None
r"""Slip on subfault in strike direction in meters."""
self.rake = None
r"""Rake of subfault movement in degrees."""
self.dip = None
r"""Subfault's angle of dip"""
self.latitude = None
r"""Latitutde of the subfault based on *coordinate_specification*."""
self.longitude = None
r"""Longitude of the subfault based on *coordinate_specification*."""
self.coordinate_specification = None
r"""Specifies where the latitude, longitude and depth are measured from."""
# default value for rigidity = shear modulus
# Note that standard units for mu is now Pascals.
# Multiply by 10 to get dyne/cm^2 value.
self.mu = 4e10
r"""Rigidity (== shear modulus) in Pascals."""
self._centers = None
self._corners = None
def convert_to_standard_units(self, input_units, verbose=False):
r"""
Convert parameters from the units used for input into the standard
units used in this module.
"""
params = input_units.keys()
for param in params:
value = getattr(self, param)
converted_value = convert_units(value, input_units[param], 1)
setattr(self,param,converted_value)
if verbose:
print "%s %s %s converted to %s %s" \
% (param, value, input_units[param], converted_value, \
standard_units[param])
def Mo(self):
r"""Calculate the seismic moment for a single subfault
Returns in units of N-m and assumes mu is in Pascals.
"""
total_slip = self.length * self.width * abs(self.slip)
Mo = self.mu * total_slip
return Mo
def __str__(self):
output = "Subfault Characteristics:\n"
output += " Coordinates: (%s, %s) (%s)\n" % (self.longitude,
self.latitude,
self.coordinate_specification)
output += " Dimensions (L,W): (%s, %s) m\n" % (self.length, self.width)
output += " Depth: %s m\n" % (self.depth)
output += " Rake, Strike, Dip: %s, %s, %s\n" % (self.rake, self.strike,
self.dip)
output += " Slip, Moment: %s m, %s N-m\n" % (self.slip, self.Mo())
output += " Fault Centroid: %s\n" % self.centers[1]
return output
def calculate_geometry(self):
r"""Calculate the fault geometry.
Routine calculates the class attributes *corners* and
*centers* which are the corners of the fault plane and
points along the centerline respecitvely in 3D space.
**Note:** *self.coordinate_specification* specifies the location on each
subfault that corresponds to the (longitude,latitude) and depth
of the subfault.
Currently must be one of these strings:
- "bottom center": (longitude,latitude) and depth at bottom center
- "top center": (longitude,latitude) and depth at top center
- "centroid": (longitude,latitude) and depth at centroid of plane
- "noaa sift": (longitude,latitude) at bottom center, depth at top,
This mixed convention is used by the NOAA SIFT
database and "unit sources", see:
http://nctr.pmel.noaa.gov/propagation-database.html
The Okada model is expressed assuming (longitude,latitude) and depth
are at the bottom center of the fault plane, so values must be
shifted or other specifications.
"""
# Simple conversion factor of latitude to meters
lat2meter = util.dist_latlong2meters(0.0, 1.0)[1]
# Setup coordinate arrays
self._corners = [[None, None, None], # a
[None, None, None], # b
[None, None, None], # c
[None, None, None]] # d
self._centers = [[None, None, None], # 1
[None, None, None], # 2
[None, None, None]] # 3
# Set depths
if self.coordinate_specification == 'top center' or \
self.coordinate_specification == 'noaa sift':
self._centers[0][2] = self.depth
self._centers[1][2] = self.depth \
+ 0.5 * self.width * numpy.sin(self.dip * DEG2RAD)
self._centers[2][2] = self.depth \
+ self.width * numpy.sin(self.dip * DEG2RAD)
elif self.coordinate_specification == 'centroid':
self._centers[0][2] = self.depth \
- 0.5 * self.width * numpy.sin(self.dip * DEG2RAD)
self._centers[1][2] = self.depth
self._centers[2][2] = self.depth \
+ 0.5 * self.width * numpy.sin(self.dip * DEG2RAD)
elif self.coordinate_specification == 'bottom center':
self._centers[0][2] = self.depth \
- self.width * numpy.sin(self.dip * DEG2RAD)
self._centers[1][2] = self.depth \
- 0.5 * self.width * numpy.sin(self.dip * DEG2RAD)
self._centers[2][2] = self.depth
else:
raise ValueError("Invalid coordinate specification %s." \
% self.coordinate_specification)
self._corners[0][2] = self._centers[0][2]
self._corners[3][2] = self._centers[0][2]
self._corners[1][2] = self._centers[2][2]
self._corners[2][2] = self._centers[2][2]
# Locate fault plane in 3D space:
# See the class docstring for a guide to labeling of corners/centers.
# Vector *up_dip* goes from bottom edge to top edge, in meters,
# from point 2 to point 0 in the figure in the class docstring.
up_dip = (-self.width * numpy.cos(self.dip * DEG2RAD) \
* numpy.cos(self.strike * DEG2RAD) \
/ (LAT2METER * numpy.cos(self.latitude * DEG2RAD)),
self.width * numpy.cos(self.dip * DEG2RAD) \
* numpy.sin(self.strike * DEG2RAD) / LAT2METER)
if self.coordinate_specification == 'top center':
self._centers[0][:2] = (self.longitude, self.latitude)
self._centers[1][:2] = (self.longitude - 0.5 * up_dip[0],
self.latitude - 0.5 * up_dip[1])
self._centers[2][:2] = (self.longitude - up_dip[0],
self.latitude - up_dip[1])
elif self.coordinate_specification == 'centroid':
self._centers[0][:2] = (self.longitude + 0.5 * up_dip[0],
self.latitude + 0.5 * up_dip[1])
self._centers[1][:2] = (self.longitude, self.latitude)
self._centers[2][:2] = (self.longitude - 0.5 * up_dip[0],
self.latitude - 0.5 * up_dip[1])
elif self.coordinate_specification == 'bottom center' or \
self.coordinate_specification == 'noaa sift':
# Non-rotated lcoations of center-line coordinates
self._centers[0][:2] = (self.longitude + up_dip[0],
self.latitude + up_dip[1])
self._centers[1][:2] = (self.longitude + 0.5 * up_dip[0],
self.latitude + 0.5 * up_dip[1])
self._centers[2][:2] = (self.longitude, self.latitude)
else:
raise ValueError("Unknown coordinate specification '%s'." \
% self.coordinate_specification)
# Calculate coordinates of corners:
# Vector *strike* goes along the top edge from point 1 to point a
# in the figure in the class docstring.
up_strike = (0.5 * self.length * numpy.sin(self.strike * DEG2RAD) \
/ (lat2meter * numpy.cos(self._centers[2][1] * DEG2RAD)),
0.5 * self.length * numpy.cos(self.strike * DEG2RAD) \
/ lat2meter)
self._corners[0][:2] = (self._centers[0][0]
+ up_strike[0],
self._centers[0][1]
+ up_strike[1])
self._corners[1][:2] = (self._centers[2][0]
+ up_strike[0],
self._centers[2][1]
+ up_strike[1])
self._corners[2][:2] = (self._centers[2][0]
- up_strike[0],
self._centers[2][1]
- up_strike[1])
self._corners[3][:2] = (self._centers[0][0]
- up_strike[0],
self._centers[0][1]
- up_strike[1])
def okada(self, x, y, y_disp=False, x_disp=False):
r"""
Apply Okada to this subfault and return a DTopography object.
:Input:
- x,y are 1d arrays
- x_disp == True means also return horizontal displacement in dip direction
:Output:
- DTopography object with dZ array of shape (1,len(x),len(y))
with single static displacement and times = [0.].
- Another DTopography object if x_disp==True
Calculates the vertical displacement by default.
Okada model is a mapping from several fault parameters
to a surface deformation.
See Okada 1985 [Okada85]_, or Okada 1992, Bull. Seism. Soc. Am.
okadamap function riginally written in Python by Dave George for
Clawpack 4.6 okada.py routine, with some routines adapted
from fortran routines written by Xiaoming Wang.
Rewritten and made more flexible by Randy LeVeque
**Note:** *self.coordinate_specification* (str) specifies the location on
each subfault that corresponds to the (longitude,latitude) and depth
subfault.
See the documentation for *SubFault.calculate_geometry* for dicussion of the
possible values *self.coordinate_specification* can take.
"""
# Okada model assumes x,y are at bottom center:
x_bottom = self.centers[2][0]
y_bottom = self.centers[2][1]
depth_bottom = self.centers[2][2]
length = self.length
width = self.width
depth = self.depth
slip = self.slip
halfL = 0.5*length
w = width
# convert angles to radians:
ang_dip = DEG2RAD * self.dip
ang_rake = DEG2RAD * self.rake
ang_strike = DEG2RAD * self.strike
dtopo = DTopography()
dtopo.x = x
dtopo.y = y
dtopo.times = [0.]
X,Y = numpy.meshgrid(x, y) # use convention of upper case for 2d
dtopo.X = X
dtopo.Y = Y
# Convert distance from (X,Y) to (x_bottom,y_bottom) from degrees to
# meters:
xx = LAT2METER * numpy.cos(DEG2RAD * Y) * (X - x_bottom)
yy = LAT2METER * (Y - y_bottom)
# Convert to distance along strike (x1) and dip (x2):
x1 = xx * numpy.sin(ang_strike) + yy * numpy.cos(ang_strike)
x2 = xx * numpy.cos(ang_strike) - yy * numpy.sin(ang_strike)
# In Okada's paper, x2 is distance up the fault plane, not down dip:
x2 = -x2
p = x2 * numpy.cos(ang_dip) + depth_bottom * numpy.sin(ang_dip)
q = x2 * numpy.sin(ang_dip) - depth_bottom * numpy.cos(ang_dip)
strike_slips = [self._strike_slip_z]
dip_slips = [self._dip_slip_z]
dFs = ['z']
dtopo.dY = dtopo.dX = None # if not set below
if y_disp:
strike_slips.append(self._strike_slip_y)
dip_slips.append(self._dip_slip_y)
dFs.append('y')
if x_disp:
strike_slips.append(self._strike_slip_x)
dip_slips.append(self._dip_slip_x)
dFs.append('x')
for strike_slip,dip_slip,dF in zip(strike_slips, dip_slips, dFs):
f1 = strike_slip(x1 + halfL, p, ang_dip, q)
f2 = strike_slip(x1 + halfL, p - w, ang_dip, q)
f3 = strike_slip(x1 - halfL, p, ang_dip, q)
f4 = strike_slip(x1 - halfL, p - w, ang_dip, q)
g1=dip_slip(x1 + halfL, p, ang_dip, q)
g2=dip_slip(x1 + halfL, p - w, ang_dip, q)
g3=dip_slip(x1 - halfL, p, ang_dip, q)
g4=dip_slip(x1 - halfL, p - w, ang_dip, q)
# Displacement in direction of strike and dip:
ds = slip * numpy.cos(ang_rake)
dd = slip * numpy.sin(ang_rake)
us = (f1 - f2 - f3 + f4) * ds
ud = (g1 - g2 - g3 + g4) * dd
dz = (us+ud)
dZ = numpy.array(dz, ndmin=3)
if dF=='z': dtopo.dZ = dZ
if dF=='y': dtopo.dY = -dZ # since y is up-dip in Okada paper
if dF=='x': dtopo.dX = dZ
#import pdb; pdb.set_trace()
self.dtopo = dtopo
return dtopo
# Utility functions for okada:
def _strike_slip_z(self, y1, y2, ang_dip, q):
"""
Used for Okada's model
Methods from Yoshimitsu Okada (1985)
"""
sn = numpy.sin(ang_dip)
cs = numpy.cos(ang_dip)
d_bar = y2*sn - q*cs
r = numpy.sqrt(y1**2 + y2**2 + q**2)
xx = numpy.sqrt(y1**2 + q**2)
a4 = 2.0*poisson/cs*(numpy.log(r+d_bar) - sn*numpy.log(r+y2))
f = -(d_bar*q/r/(r+y2) + q*sn/(r+y2) + a4*sn)/(2.0*3.14159)
return f
def _dip_slip_z(self, y1, y2, ang_dip, q):
"""
Based on Okada's paper (1985)
Added by Xiaoming Wang
"""
sn = numpy.sin(ang_dip)
cs = numpy.cos(ang_dip)
d_bar = y2*sn - q*cs;
r = numpy.sqrt(y1**2 + y2**2 + q**2)
xx = numpy.sqrt(y1**2 + q**2)
a5 = 4.*poisson/cs*numpy.arctan((y2*(xx+q*cs)+xx*(r+xx)*sn)/y1/(r+xx)/cs)
f = -(d_bar*q/r/(r+y1) + sn*numpy.arctan(y1*y2/q/r) - a5*sn*cs)/(2.0*3.14159)
return f
def _strike_slip_y(self, y1, y2, ang_dip, q):
"""
Used for Okada's model
Methods from Yoshimitsu Okada (1985)
y = down-dip direction
"""
sn = numpy.sin(ang_dip)
cs = numpy.cos(ang_dip)
d_bar = y2*sn - q*cs
r = numpy.sqrt(y1**2 + y2**2 + q**2)
xx = numpy.sqrt(y1**2 + q**2)
y_bar = y2*cs + q*sn
a4 = 2.0*poisson/cs*(numpy.log(r+d_bar) - sn*numpy.log(r+y2))
y_bar = y2*cs + q*sn
a3 = 2.0*poisson*(y_bar/(cs*(r+d_bar)) - numpy.log(r+y2)) + a4*sn/cs
a2 = 2.*poisson*(-numpy.log(r+y2)) - a3
f = -(y_bar*q/r/(r+y2) + q*cs/(r+d_bar) + a2*sn)/(2.0*3.14159)
return f
def _dip_slip_y(self, y1, y2, ang_dip, q):
"""
Based on Okada's paper (1985)
y = down-dip direction
"""
sn = numpy.sin(ang_dip)
cs = numpy.cos(ang_dip)
d_bar = y2*sn - q*cs;
r = numpy.sqrt(y1**2 + y2**2 + q**2)
xx = numpy.sqrt(y1**2 + q**2)
y_bar = y2*cs + q*sn
a5 = 4.*poisson/cs*numpy.arctan((y2*(xx+q*cs)+xx*(r+xx)*sn)/y1/(r+xx)/cs)
a1 = 2.0*poisson*(-y1/(cs*(r+d_bar))) - sn/cs * a5
f = -(y_bar*q/r/(r+y1) + cs*numpy.arctan(y1*y2/q/r) - a1*sn*cs)/(2.0*3.14159)
return f
def _strike_slip_x(self, y1, y2, ang_dip, q):
"""
Used for Okada's model
Methods from Yoshimitsu Okada (1985)
"""
sn = numpy.sin(ang_dip)
cs = numpy.cos(ang_dip)
d_bar = y2*sn - q*cs
r = numpy.sqrt(y1**2 + y2**2 + q**2)
xx = numpy.sqrt(y1**2 + q**2)
a5 = 4.*poisson/cs*numpy.arctan((y2*(xx+q*cs)+xx*(r+xx)*sn)/y1/(r+xx)/cs)
a1 = 2.0*poisson*(-y1/(cs*(r+d_bar))) - sn/cs * a5
#a4 = 2.0*poisson/cs*(numpy.log(r+d_bar) - sn*numpy.log(r+y2))
f = -(y1*q/r/(r+y2) + numpy.arctan(y1*y2/q/r) + a1*sn)/(2.0*3.14159)
return f
def _dip_slip_x(self, y1, y2, ang_dip, q):
"""
Based on Okada's paper (1985)
Added by Xiaoming Wang
"""
sn = numpy.sin(ang_dip)
cs = numpy.cos(ang_dip)
d_bar = y2*sn - q*cs;
r = numpy.sqrt(y1**2 + y2**2 + q**2)
xx = numpy.sqrt(y1**2 + q**2)
#a5 = 4.*poisson/cs*numpy.arctan((y2*(xx+q*cs)+xx*(r+xx)*sn)/y1/(r+xx)/cs)
a4 = 2.0*poisson/cs*(numpy.log(r+d_bar) - sn*numpy.log(r+y2))
ytilde = y2*cs + q*sn
a3 = 2.0*poisson*(ytilde/(cs*(r+d_bar)) - numpy.log(r+y2)) + a4*sn/cs
f = -(q/r - a3*sn*cs)/(2.0*3.14159)
return f
def dynamic_slip(self, t):
r"""
For a dynamic fault, compute the slip at time t.
Assumes the following attributes are set:
- *rupture_time*
- *rise_time*
- *rise_time_ending*: optional, defaults to *rise_time*
"""
if (self.rupture_time is None) or (self.rise_time is None):
raise ValueError("Computing a dynamic slip only works for dynamic ",
"rupture types")
t0 = self.rupture_time
t1 = self.rise_time
t2 = getattr(self,'rise_time_ending',None)
rf = rise_fraction(t,t0,t1,t2)
return rf * self.slip
# ==============================================================================
# UCSB sub-class of Fault
# ==============================================================================
class UCSBFault(Fault):
r"""Fault subclass for reading in subfault format models from UCSB
Read in subfault format models produced by Chen Ji's group at UCSB,
downloadable from:
http://www.geol.ucsb.edu/faculty/ji/big_earthquakes/home.html
"""
def __init__(self, path=None, **kwargs):
r"""UCSBFault initialization routine.
See :class:`UCSBFault` for more info.
"""
self.num_cells = [None, None] # RJL: Why needed??
# Do not really but the specification
# has it.
super(UCSBFault, self).__init__()
if path is not None:
self.read(path, **kwargs)
def read(self, path, rupture_type='static'):
r"""Read in subfault specification at *path*.
Creates a list of subfaults from the subfault specification file at
*path*.
Subfault format contains info for dynamic rupture, so can specify
rupture_type = 'static' or 'dynamic'
"""
self.rupture_type = rupture_type
# Read header of file
regexp_dx = re.compile(r"Dx=[ ]*(?P<dx>[^k]*)")
regexp_dy = re.compile(r"Dy=[ ]*(?P<dy>[^k]*)")
regexp_nx = re.compile(r"nx[^=]*=[ ]*(?P<nx>[^D]*)")
regexp_ny = re.compile(r"ny[^=]*=[ ]*(?P<ny>[^D]*)")
found_subfault_discretization = False
found_subfault_boundary = False
header_lines = 0
with open(path, 'r') as subfault_file:
# Find fault secgment discretization
for (n,line) in enumerate(subfault_file):
result_dx = regexp_dx.search(line)
result_dy = regexp_dy.search(line)
result_nx = regexp_nx.search(line)
result_ny = regexp_ny.search(line)
if result_dx and result_dy:
dx = float(result_dx.group('dx'))
dy = float(result_dy.group('dy'))
self.num_cells[0] = int(result_nx.group('nx'))
self.num_cells[1] = int(result_ny.group('ny'))
found_subfault_discretization = True
break
header_lines += n
# Parse boundary
in_boundary_block = False
boundary_data = []
for (n,line) in enumerate(subfault_file):
if line[0].strip() == "#":
if in_boundary_block and len(boundary_data) == 5:
found_subfault_boundary = True
break
else:
in_boundary_block = True
boundary_data.append([float(value) for value in line.split()])
# Assume that there is a column label right underneath the boundary
# specification
header_lines += n + 2
# Check to make sure last boundary point matches, then throw away
if boundary_data[0] != boundary_data[4]:
raise ValueError("Boundary specified incomplete: ",
"%s" % boundary_data)
# Locate fault plane in 3D space - see SubFault `calculate_geometry`
# for
# These are only useful in sub-faults
# a schematic of where these points are
# self._corners = [None, # a
# None, # b
# None, # c
# None] # d
# self._centers = [[0.0, 0.0, 0.0], # 1
# [0.0, 0.0, 0.0], # 2
# [0.0, 0.0, 0.0]] # 3
# # :TODO: Is the order of this a good assumption?
# self._corners[0] = boundary_data[0]
# self._corners[3] = boundary_data[1]
# self._corners[2] = boundary_data[2]
# self._corners[1] = boundary_data[3]
# # Calculate center by averaging position of appropriate corners
# for (n, corner) in enumerate(self._corners):
# for i in xrange(3):
# self._centers[1][i] += corner[i] / 4
# if n == 0 or n == 4:
# for i in xrange(3):
# self._centers[0][i] += corner[i] / 2
# else:
# for i in xrange(3):
# self._centers[2][i] += corner[i] / 2
if not (found_subfault_boundary and found_subfault_discretization):
raise ValueError("Could not find base fault characteristics in ",
"subfault specification file at %s." % path)
# Calculate center of fault
column_map = {"latitude":0, "longitude":1, "depth":2, "slip":3,
"rake":4, "strike":5, "dip":6, "rupture_time":7,
"rise_time":8, "rise_time_ending":9, "mu":10}
defaults = {"length":dx, "width":dy}
input_units = {"slip":"cm", "depth":"km", 'mu':"dyne/cm^2",
"length":"km", "width":"km"}
super(UCSBFault, self).read(path, column_map, skiprows=header_lines,
coordinate_specification="centroid",
input_units=input_units, defaults=defaults)
# ==============================================================================
# CSV sub-class of Fault
# ==============================================================================
class CSVFault(Fault):
r"""Fault subclass for reading in CSV formatted files
Assumes that the first row gives the column headings
"""
def read(self, path, input_units={}, coordinate_specification="top center",
rupture_type="static", verbose=False):
r"""Read in subfault specification at *path*.
Creates a list of subfaults from the subfault specification file at
*path*.
"""
possible_column_names = """longitude latitude length width depth strike dip
rake slip mu rupture_time rise_time rise_time_ending""".split()
param = {}
for n in possible_column_names:
param[n] = n
# alternative names that might appear in csv file:
param["rigidity"] = "mu"
param["rupture time"] = "rupture_time"
param["rise time"] = "rise_time"
# Read header of file
with open(path, 'r') as subfault_file:
header_line = subfault_file.readline().split(",")
column_map = {}
for (n,column_heading) in enumerate(header_line):
if "(" in column_heading:
# Strip out units if present
unit_start = column_heading.find("(")
unit_end = column_heading.find(")")
column_name = column_heading[:unit_start].lower()
units = column_heading[unit_start+1:unit_end]
if verbose and input_units.get(column_name,units) != units:
print "*** Warning: input_units[%s] reset to %s" \
% (column_name, units)
print " based on file header"
input_units[column_name] = units
else:
column_name = column_heading.lower()
column_name = column_name.strip()
if column_name in param.keys():
column_key = param[column_name]
column_map[column_key] = n
else:
print "*** Warning: column name not recognized: %s" \
% column_name
super(CSVFault, self).read(path, column_map=column_map, skiprows=1,
delimiter=",", input_units=input_units,
coordinate_specification=coordinate_specification,
rupture_type=rupture_type)
# ==============================================================================
# Sift sub-class of Fault
# ==============================================================================
class SiftFault(Fault):
r"""
Define a fault by specifying the slip on a subset of the SIFT unit sources.
The database is read in by load_sift_unit_sources.
See http://www.pmel.noaa.gov/pubs/PDF/gica2937/gica2937.pdf
for a discussion of these unit sources, although the database used
is more recent than what is reported in that paper and uses different
notation for the subfault names.
The subfault database used was downloaded from
http://sift.pmel.noaa.gov/ComMIT/compressed/info_sz.dat
Example:
>>> sift_slip = {'acsza1':2, 'acszb1':3}
>>> fault = SiftFault(sift_slip)
results in a fault with two specified subfaults with slip of 2 and 3 meters.
"""
def __init__(self, sift_slip=None):
r"""SiftFault initialization routine.
See :class:`SiftFault` for more info.
"""
super(SiftFault, self).__init__()
self._load_sift_unit_sources()
if sift_slip is not None:
self.set_subfaults(sift_slip)
def set_subfaults(self,sift_slip):
r"""
*sift_slip* (dict) is a dictionary with key = name of unit source
and value = magnitude of slip to assign (in meters).
"""
self.subfaults = []
for k,v in sift_slip.iteritems():
subfault = self.sift_subfaults[k]
subfault.slip = v
self.subfaults.append(subfault)
def _load_sift_unit_sources(self):
r"""
Load SIFT unit source subfault data base.
File was downloaded from
http://sift.pmel.noaa.gov/ComMIT/compressed/info_sz.dat
"""
unit_source_file = os.path.join(os.path.dirname(__file__), 'data',
'info_sz.dat.txt')
self.input_units = {'length':'km', 'width':'km', 'depth':'km', 'slip':'m',
'mu':"dyne/cm^2"}
self.sift_subfaults = {}
with open(unit_source_file, 'r') as sift_file:
# Skip first two lines
sift_file.readline(); sift_file.readline()
for line in sift_file:
tokens = line.split(',')
name = tokens[0]
# url = tokens[1]
subfault = SubFault()
subfault.longitude = float(tokens[2])
subfault.latitude = float(tokens[3])
subfault.slip = float(tokens[4])
subfault.strike = float(tokens[5])
subfault.dip = float(tokens[6])
subfault.depth = float(tokens[7])
subfault.length = float(tokens[8])
subfault.width = float(tokens[9])
subfault.rake = float(tokens[10])
subfault.coordinate_specification = "noaa sift"
# subfault.mu = ?? ## currently using SubFault default
subfault.convert_to_standard_units(self.input_units)
self.sift_subfaults[name] = subfault
# ==============================================================================
# Subdivided plane sub-class of Fault
# ==============================================================================
class SubdividedPlaneFault(Fault):
r"""
Define a fault by starting with a single fault plane (specified as
*base_subfault* of class *SubFault*) and subdividing the fault plane
into a rectangular array of *nstrike* by *ndip* equally sized subfaults.
By default,the slip on each subfault will be initialized to
*base_subfault.slip* so that the slip is uniform over the original plane
and the seismic moment is independent of the number of subdivisions.
Alternatively, the slip distribution can be specified by providing a
function *slip_distribution*, which should be a function of *(xi,eta)*
with each variable ranging from 0 to 1. *xi* varies from 0 at the top
of the fault to 1 at the bottom in the down-dip direction.
*eta* varies from one edge of the fault to the other moving in the
strike direction. This function will be evaluated at the centroid of
each subfault to set the slip.
Can also specify a desired seismic moment Mo in which case the slips will
be rescaled at the end so the total seismic moment is Mo. In this case
the *slip_distribution* function only indicates the relative slip
between subfaults.
"""
def __init__(self, base_subfault, nstrike=1, ndip=1,
slip_function=None, Mo=None):
r"""SubdivdedPlaneFault initialization routine.
See :class:`SubdivdedPlaneFault` for more info.
"""
super(SubdividedPlaneFault, self).__init__()
self.base_subfault = base_subfault
self.nstrike = nstrike
self.ndip = ndip
self.subdivide(nstrike, ndip, slip_function, Mo)
def subdivide(self, nstrike=1, ndip=1, slip_function=None, Mo=None):
r"""Subdivide the fault plane into nstrike * ndip subfaults."""
# may have changed resolution:
self.nstrike = nstrike
self.ndip = ndip
base_subfault = self.base_subfault
strike = base_subfault.strike
dip = base_subfault.dip
rake = base_subfault.rake
slip = base_subfault.slip
length = base_subfault.length
width = base_subfault.width
# unpack corners from fault plane geometry:
x_corners = [base_subfault.corners[2][0],
base_subfault.corners[3][0],
base_subfault.corners[0][0],
base_subfault.corners[1][0],
base_subfault.corners[2][0]]
y_corners = [base_subfault.corners[2][1],
base_subfault.corners[3][1],
base_subfault.corners[0][1],
base_subfault.corners[1][1],
base_subfault.corners[2][0]]
# set depth at corners:
depth_top = base_subfault.centers[0][2]
depth_bottom = base_subfault.centers[2][2]
d_corners = [depth_bottom, depth_top, depth_top, depth_bottom,
depth_bottom]
# coefficients for bilinear interpolants:
cx = [x_corners[1],
x_corners[0] - x_corners[1],
x_corners[2] - x_corners[1],
x_corners[3] + x_corners[1] - x_corners[2] - x_corners[0]]
cy = [y_corners[1],
y_corners[0] - y_corners[1],
y_corners[2] - y_corners[1],
y_corners[3] + y_corners[1] - y_corners[2] - y_corners[0]]
cd = [d_corners[1],
d_corners[0] - d_corners[1],
d_corners[2] - d_corners[1],
d_corners[3] + d_corners[1] - d_corners[2] - d_corners[0]]
self.subfaults = []
# determine coordinates for each subfault.
# note that xi goes from 0 to 1 from top to bottom in dip direction,
# eta goes from 0 to 1 in along-strike direction.
dxi = 1. / ndip
deta = 1. / nstrike
for i in range(ndip):
xi = numpy.array([i, i+0.5, i+1.]) * dxi # xi at top, center, bottom
for j in range(nstrike):
eta = (j+0.5)*deta
# interpolate longitude,latitude,depth from corners:
x_sf = cx[0] + cx[1]*xi + cx[2]*eta + cx[3]*xi*eta
y_sf = cy[0] + cy[1]*xi + cy[2]*eta + cy[3]*xi*eta
d_sf = cd[0] + cd[1]*xi + cd[2]*eta + cd[3]*xi*eta
subfault = SubFault()
if base_subfault.coordinate_specification == 'centroid':
subfault.longitude = x_sf[1]
subfault.latitude = y_sf[1]
subfault.depth = d_sf[1]
elif base_subfault.coordinate_specification == 'top center':
subfault.longitude = x_sf[0]
subfault.latitude = y_sf[0]
subfault.depth = d_sf[0]
elif base_subfault.coordinate_specification == 'noaa sift':
subfault.longitude = x_sf[2]
subfault.latitude = y_sf[2]
subfault.depth = d_sf[0]
else:
msg = "Unrecognized coordinate_specification: %s" \
% base_subfault.coordinate_specification
raise NotImplementedError(msg)
subfault.dip = dip
subfault.strike = strike
subfault.rake = rake
subfault.length = length / nstrike
subfault.width = width / ndip
subfault.slip = slip
subfault.coordinate_specification = \
base_subfault.coordinate_specification
subfault.mu = base_subfault.mu
self.subfaults.append(subfault)
if slip_function is not None:
self.set_slip(nstrike, ndip, slip_function, Mo)
def set_slip(self, nstrike, ndip, slip_function, Mo=None):
self.slip_function = slip_function
dxi = 1. / ndip
deta = 1. / nstrike
Mo_0 = 0.
k = 0
for i in range(ndip):
xi = (i+0.5) * dxi
for j in range(nstrike):
eta = (j+0.5) * deta
subfault = self.subfaults[k]
k = k+1
subfault.slip = slip_function(xi,eta)
Mo_0 += subfault.Mo()
if Mo is not None:
# rescale slip on each subfault to achieve desired seismic moment
Mo_ratio = Mo / Mo_0
for k in range(len(self.subfaults)):
self.subfaults[k].slip *= Mo_ratio
# ==============================================================================
# Tensor product sub-class of Fault
# ==============================================================================
class TensorProductFault(SubdividedPlaneFault):
r"""
Define a fault by starting with a single fault plane (specified as
*fault_plane* of class *SubFault*) and subdividing the fault plane
into a rectangular array of *nstrike* by *ndip* equally sized subfaults.
Then define the slip on each subfault via
two one-dimensional functions *slip_along_strike* and
*slip_down_dip* that specify the slip as a function of fractional
distance in the along-strike and down-dip direction respectively
(i.e. the argument of each goes from 0 to 1).
Setting either to None defaults to constant function 1.
The slip is set by evaluating the tensor product at the centroid of
each subfault.
Can specify a desired seismic moment Mo in which case the slips will
be rescaled at the end.
"""
def __init__(self, fault_plane, slip_along_strike=None, slip_down_dip=None,
nstrike=1, ndip=1, Mo=None):
r"""TensorProductFault initialization routine.
See :class:`TensorProductFault` for more info.
"""
# perform the subdivision and set parameters on each subfault:
super(TensorProductFault, self).__init__(fault_plane, nstrike, ndip)
if slip_along_strike is None:
# set to constant in the strike direction if not specified
slip_along_strike = lambda eta: 1.0
if slip_down_dip is None:
# set to constant in the dip direction if not specified
slip_down_dip = lambda xi: 1.0
| bsd-2-clause |
mikestock/intf-tools | xintf.py | 1 | 41182 | #!/usr/bin/python
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure, SubplotParams
from matplotlib.widgets import SpanSelector, RectangleSelector
from matplotlib import rc, gridspec, cm, colors
import numpy as np
import wx, os, gzip, time, glob,sys
from wx.lib.masked import NumCtrl
import intf_tools as it
rc('savefig',dpi=100)
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
colorDict = { 'red': ( (0.0, 0.3, 0.3),
(0.1, 0.0, 0.0),
(0.3, 0.0, 0.0),
(0.55, 0.0, 0.0),
(0.8, 0.8, 0.8),
(1.0, 1.0, 1.0) ),
'green':( (0.0, 0.0, 0.0),
(0.1, 0.0, 0.0),
(0.3, 1.0, 1.0),
(0.55, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 0.0, 0.0) ),
'blue':( (0.0, 0.5, 0.5),
(0.1, 1.0, 1.0),
(0.3, 1.0, 1.0),
(0.55, 0.0, 0.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 0.0) ) }
#gCmap = colors.LinearSegmentedColormap('wjet',colorDict,256)
gCmap = it.cmap_mjet
class MainTab(wx.Panel):
def __init__(self, parent,root):
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
self.parent = parent
self.root = root
####
# opening buttons
self.btnSave = wx.Button(self, label='Save')
self.Bind(wx.EVT_BUTTON, self.OnBtnSave, self.btnSave)
self.btnOpen = wx.Button(self, label='Open')
self.Bind(wx.EVT_BUTTON, self.OnBtnOpen, self.btnOpen)
self.btnNext = wx.Button(self, label='Next')
self.Bind(wx.EVT_BUTTON, self.OnBtnNext, self.btnNext)
self.btnPrev = wx.Button(self, label='Previous')
self.Bind(wx.EVT_BUTTON, self.OnBtnPrev, self.btnPrev)
###
# Check box for the time mode
self.chkTime = wx.CheckBox(self, label='Time From Second')
self.Bind(wx.EVT_CHECKBOX, self.OnChkTime, self.chkTime)
####
# the only limit button, reset
self.btnReset = wx.Button(self, label='Reset Limits')
self.Bind(wx.EVT_BUTTON, self.OnBtnReset, self.btnReset)
####
# coloring buttons
self.cmbColor = wx.ComboBox(self,
choices=['Greyscale','By Time','By Points','By Amplitude'],
value='By Time', style=wx.CB_READONLY )
self.lblColor = wx.StaticText(self, label=' Color')
self.Bind(wx.EVT_COMBOBOX,self.OnCmbColor,self.cmbColor)
self.cmbSize = wx.ComboBox(self,
choices=['Small','Medium','Large','Amplitude','Exagerated'],
value='Medium', style=wx.CB_READONLY )
self.lblSize = wx.StaticText(self, label=' Marker Size')
self.Bind(wx.EVT_COMBOBOX,self.OnCmbSize,self.cmbSize)
self.cmbAlpha = wx.ComboBox(self,
choices=['None','Some','More'],
value='None', style=wx.CB_READONLY )
self.lblAlpha = wx.StaticText(self, label=' Transparency')
self.Bind(wx.EVT_COMBOBOX,self.OnCmbAlpha,self.cmbAlpha)
###
# Projections
self.cmbProjection = wx.ComboBox(self,
choices=['Cosine', 'Az-El'],
value='Cosine', style=wx.CB_READONLY )
self.lblProjection = wx.StaticText(self, label=' Projection')
self.Bind(wx.EVT_COMBOBOX,self.OnCmbProjection,self.cmbProjection)
self.topSizer = wx.BoxSizer(wx.HORIZONTAL)
self.topSizer.Add(self.btnOpen,0,wx.RIGHT)
self.topSizer.AddStretchSpacer(1)
self.topSizer.Add(self.btnSave,0,wx.RIGHT)
self.topSizer.AddStretchSpacer(1)
self.topSizer.Add(self.btnReset,0,wx.RIGHT)
self.btmSizer1 = wx.BoxSizer(wx.HORIZONTAL)
self.btmSizer1.Add(self.btnPrev,0,wx.RIGHT|wx.BOTTOM)
self.btmSizer1.AddStretchSpacer(1)
self.btmSizer1.Add(self.btnNext,0,wx.RIGHT|wx.BOTTOM)
self.grid = wx.FlexGridSizer(rows=5,cols=2,hgap=5,vgap=5)
self.grid.Add(self.lblProjection,1,wx.LEFT)
self.grid.Add(self.cmbProjection,1,wx.RIGHT)
self.grid.Add(self.lblColor,1,wx.LEFT)
self.grid.Add(self.cmbColor,1,wx.RIGHT)
self.grid.Add(self.lblAlpha,1,wx.LEFT)
self.grid.Add(self.cmbAlpha,1,wx.RIGHT)
self.grid.Add(self.lblSize,1,wx.LEFT)
self.grid.Add(self.cmbSize,1,wx.RIGHT)
self.grid.AddStretchSpacer(1)
self.grid.Add(self.chkTime,1,wx.LEFT)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddSizer(self.topSizer,0,wx.RIGHT)
self.sizer.AddStretchSpacer(1)
self.sizer.AddSizer(self.grid,0,wx.RIGHT)
self.sizer.AddStretchSpacer(1)
self.sizer.AddSizer(self.btmSizer1,0,wx.RIGHT)
self.SetSizer(self.sizer)
self.Fit()
###
#the checkbox
def OnChkTime(self,e):
if self.chkTime.GetValue():
print 'time from second'
offset = self.root.plotPanel.data.time_from_second()
#change the title
else:
print 'time from trigger'
offset = self.root.plotPanel.data.time_from_trigger()
#update the limits history
self.root.plotPanel.OffsetLimits(offset)
self.root.plotPanel.UpdatePlot(update_overview=True)
###
#the color comboboxes
def OnCmbColor(self,e):
i = e.GetSelection()
print 'changing color option to %i'%i
self.root.plotPanel.colorOp = i
self.root.plotPanel.UpdatePlot()
def OnCmbSize(self,e):
i = e.GetSelection()
self.root.plotPanel.sizeOp = i
self.root.plotPanel.UpdatePlot()
def OnCmbAlpha(self,e):
i = e.GetSelection()
self.root.plotPanel.alphaOp = i
self.root.plotPanel.UpdatePlot()
###
#the projections combobox
def OnCmbProjection(self,e):
i = e.GetSelection()
if i == 0:
print 'Setting Cosine Proj.'
self.root.plotPanel.cosine = True
else:
print 'Setting Az-El Proj.'
self.root.plotPanel.cosine = False
self.root.plotPanel.UpdatePlot()
###
#file operations
def OnBtnSave(self,e):
defaultFileS = os.path.splitext( self.parent.parent.inFileS )[0] + '.png'
saveFileS = None
dlg = wx.FileDialog(self, "Choose a File", "",defaultFileS, "*.*", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
saveFileS = dlg.GetPath()
dlg.Destroy()
#write the file
if saveFileS != None:
print 'writing file',saveFileS
self.root.plotPanel.figure_canvas.print_figure(saveFileS)
def OnBtnOpen(self,e):
"""File Selector Dialog to open a data file"""
dlg = wx.FileDialog(self, "Choose a File", "","", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
inFileS = dlg.GetPath()
dlg.Destroy()
self.root.OpenFile(inFileS)
def OnBtnNext(self,e):
if self.root.inFileS == None:
return
inFileS = self.root.inFileS
dirS = os.path.split(inFileS)[0]
extS = os.path.splitext(inFileS)[1]
files_i = glob.glob( '%s/LB*%s'%(dirS,extS))
files_i.sort()
index = files_i.index(inFileS)+1
if index >= len(files_i):
print 'There are no more files!!'
return
inFileS = files_i[index]
self.root.OpenFile(inFileS)
def OnBtnPrev(self,e):
if self.root.inFileS == None:
return
inFileS = self.root.inFileS
dirS = os.path.split(inFileS)[0]
extS = os.path.splitext(inFileS)[1]
files_i = glob.glob( '%s/LB*%s'%(dirS,extS))
files_i.sort()
index = files_i.index(inFileS)-1
if index < 0:
print 'There are no more files!!'
return
inFileS = files_i[index]
self.root.OpenFile(inFileS)
def OnBtnReset(self,e):
self.root.plotPanel.mkPlot()
class OverlayTab(wx.Panel):
def __init__(self, parent,root):
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
self.parent = parent
self.root = root
self.font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False)
self.limitsLen = 11
self.filtersLen = 9
boxSize = (50,30)
uSecBoxSize = (100,30)
#Waveforms
self.btnReadWave = wx.Button(self, label='Open Wave')
self.Bind(wx.EVT_BUTTON, self.OnBtnReadWave, self.btnReadWave)
self.cmbColorWave = wx.ComboBox(self,
choices=['Black','Red','Blue','Green'],
value='Black', style=wx.CB_READONLY )
self.lblWaveColor = wx.StaticText(self, label=' Color ')
self.Bind(wx.EVT_COMBOBOX,self.OnCmbColorWave,self.cmbColorWave)
waveColorSz = wx.BoxSizer(wx.HORIZONTAL)
waveColorSz.Add(self.lblWaveColor,0,wx.RIGHT|wx.BOTTOM)
waveColorSz.AddStretchSpacer(1)
waveColorSz.Add(self.cmbColorWave,0,wx.RIGHT|wx.BOTTOM)
self.lblWaveLpf1 = wx.StaticText(self, label=' LPF ')
self.lblWaveLpf2 = wx.StaticText(self, label='MHz ')
self.boxWaveLpf = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.btnLpfSet = wx.Button(self, label='Set')
self.Bind(wx.EVT_BUTTON, self.OnBtnLpfSet, self.btnLpfSet)
waveLpfSz = wx.BoxSizer(wx.HORIZONTAL)
waveLpfSz.Add(self.lblWaveLpf1,0,wx.RIGHT|wx.BOTTOM)
waveLpfSz.AddStretchSpacer(1)
waveLpfSz.Add(self.boxWaveLpf,0,wx.RIGHT|wx.BOTTOM)
waveLpfSz.Add(self.lblWaveLpf2,0,wx.LEFT|wx.BOTTOM)
waveLpfSz.AddStretchSpacer(1)
waveLpfSz.Add(self.btnLpfSet)
#LMA
self.btnReadLma = wx.Button(self, label='Open LMA')
self.Bind(wx.EVT_BUTTON, self.OnBtnReadLma, self.btnReadLma)
self.cmbColorLma = wx.ComboBox(self,
choices=['Black','Red','Blue','Green'],
value='Black', style=wx.CB_READONLY )
self.lblLmaColor = wx.StaticText(self, label=' Color ')
self.Bind(wx.EVT_COMBOBOX,self.OnCmbColorLma,self.cmbColorLma)
lmaColorSz = wx.BoxSizer(wx.HORIZONTAL)
lmaColorSz.Add(self.lblLmaColor,0,wx.RIGHT|wx.BOTTOM)
lmaColorSz.AddStretchSpacer(1)
lmaColorSz.Add(self.cmbColorLma,0,wx.RIGHT|wx.BOTTOM)
#Timing offset (for use with LMA)
self.boxUSec = wx.TextCtrl(self,wx.TE_RIGHT,size=uSecBoxSize)
self.btnUSec = wx.Button(self, label='Set uSecond')
self.Bind(wx.EVT_BUTTON, self.OnBtnUSec, self.btnUSec)
uSecSz = wx.BoxSizer(wx.HORIZONTAL)
uSecSz.Add(self.boxUSec, 0,wx.RIGHT|wx.BOTTOM)
uSecSz.AddStretchSpacer(1)
uSecSz.Add(self.btnUSec, 0,wx.RIGHT|wx.BOTTOM)
#~ self.btmSizer2 = wx.BoxSizer(wx.HORIZONTAL)
#~ self.btmSizer2.Add(self.btnReadLma,0,wx.RIGHT|wx.BOTTOM)
#~ self.btmSizer2.AddStretchSpacer(1)
#~ self.btmSizer2.Add(self.btnReadWave,0,wx.RIGHT|wx.BOTTOM)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.btnReadLma,0,wx.RIGHT|wx.BOTTOM)
self.sizer.Add(lmaColorSz,0,wx.RIGHT|wx.BOTTOM)
self.sizer.Add(self.btnReadWave,0,wx.RIGHT|wx.BOTTOM)
self.sizer.Add(waveColorSz,0,wx.RIGHT|wx.BOTTOM)
self.sizer.Add(waveLpfSz,0,wx.RIGHT|wx.BOTTOM)
self.sizer.Add(uSecSz)
self.SetSizer(self.sizer)
self.Fit()
#file operations
def OnBtnReadWave(self,e):
"""File Selector Dialog to open a data file"""
dlg = wx.FileDialog(self, "Choose a File", "","", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
inFileS = dlg.GetPath()
dlg.Destroy()
self.root.OpenWave(inFileS)
if self.root.plotPanel.waveLpf == None:
self.boxWaveLpf.SetValue(' None')
else:
self.boxWaveLpf.SetValue(('%0.1f'%self.root.plotPanel.waveLpf).rjust(5))
def OnBtnReadLma(self,e):
"""File Selector Dialog to open a data file"""
dlg = wx.FileDialog(self, "Choose a File", "","", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
inFileS = dlg.GetPath()
dlg.Destroy()
self.root.OpenLma(inFileS)
#usecond
def OnBtnUSec(self,e):
#get the string in the text box
S = self.boxUSec.GetValue().strip()
uSec = self.text_parser(S)
#if needed, set time from trigger
if self.root.ctrlPanel.fileTab.chkTime.GetValue():
print 'time from trigger'
self.root.plotPanel.data.time_from_trigger()
if uSec == None:
print 'no uSecond, geting value'
#then we get the uSecond from the header
uSec = self.root.plotPanel.data.header.uSecond
else:
print 'Setting uSecond Value'
self.root.plotPanel.data.header.uSecond = uSec
#change to time from second
self.root.ctrlPanel.fileTab.chkTime.SetValue(True)
print 'time from second'
offset = self.root.plotPanel.data.time_from_second()
print offset
#update plots
self.root.plotPanel.OffsetLimits(offset)
self.root.plotPanel.UpdatePlot(update_overview=True)
#finally, set the textbox
self.boxUSec.SetValue(('%0.3f'%uSec).rjust(10))
#colors
def OnCmbColorWave(self,e):
i = e.GetString()
print 'changing Waveform color option to %s'%i
self.root.plotPanel.waveColor = i
self.root.plotPanel.UpdatePlot()
def OnCmbColorLma(self,e):
i = e.GetString()
print 'changing LMA color option to %s'%i
self.root.plotPanel.lmaColor = i
self.root.plotPanel.UpdatePlot()
#filters
def OnBtnLpfSet(self,e):
if self.root.waveFileS == None:
return
S = self.boxWaveLpf.GetValue().strip()
F = self.text_parser(S)
self.root.plotPanel.waveLpf = F
#update the value
if self.root.plotPanel.waveLpf == None:
self.boxWaveLpf.SetValue(' None')
else:
self.boxWaveLpf.SetValue(('%0.1f'%self.root.plotPanel.waveLpf).rjust(5))
print 'changing Waveform LPF to %s'%repr(F)
#update the plot
self.root.plotPanel.waveData = None #needed to force it to reload the data
self.root.plotPanel.UpdatePlot()
def text_parser(self,S):
#first see if it's a number
try:
F = float(S.strip())
except:
return None
return F
class FilterTab(wx.Panel):
def __init__(self, parent,root):
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
self.parent = parent
self.root = root
self.font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False)
self.limitsLen = 11
self.filtersLen = 9
###
# Limits
boxSize = (100,30)
lblLimits = wx.StaticText(self, label=' Limits --------------------- ')
btnLimits = wx.Button(self, label = 'Set Limits')
self.Bind(wx.EVT_BUTTON, self.OnBtnLimits, btnLimits)
btnLimBack = wx.Button(self, label = 'Back')
self.Bind(wx.EVT_BUTTON, self.OnBtnLimBack, btnLimBack)
lbltRange = wx.StaticText(self, label=' Time')
self.boxtRange0 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxtRange1 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxtRange0.SetFont(self.font)
self.boxtRange1.SetFont(self.font)
self.boxtRange0.fmt = '%4.3f'
self.boxtRange1.fmt = '%4.3f'
lblazRange = wx.StaticText(self, label=' Azimuth')
self.boxazRange0 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxazRange1 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxazRange0.SetFont(self.font)
self.boxazRange1.SetFont(self.font)
self.boxazRange0.fmt = '%3.3f'
self.boxazRange1.fmt = '%3.3f'
lblelRange = wx.StaticText(self, label=' Elevation')
self.boxelRange0 = wx.TextCtrl(self,wx.TE_CENTRE,size=boxSize)
self.boxelRange1 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxelRange0.SetFont(self.font)
self.boxelRange1.SetFont(self.font)
self.boxelRange0.fmt = '%2.3f'
self.boxelRange1.fmt = '%2.3f'
lblcaRange = wx.StaticText(self, label=' cos(a)')
self.boxcaRange0 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxcaRange1 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxcaRange0.SetFont(self.font)
self.boxcaRange1.SetFont(self.font)
self.boxcaRange0.fmt = '%1.3f'
self.boxcaRange1.fmt = '%1.3f'
lblcbRange = wx.StaticText(self, label=' cos(b)')
self.boxcbRange0 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxcbRange1 = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxcbRange0.SetFont(self.font)
self.boxcbRange1.SetFont(self.font)
self.boxcbRange0.fmt = '%1.3f'
self.boxcbRange1.fmt = '%1.3f'
###
# Filters
boxSize = (80,30)
lblFilters = wx.StaticText(self, label=' Filters --------------------- ')
btnFilters = wx.Button(self, label = 'Set Filters')
self.Bind(wx.EVT_BUTTON, self.OnBtnFilters, btnFilters)
lbleCls = wx.StaticText(self, label=' eCls')
self.boxeCls = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxeCls.SetFont(self.font)
self.boxeCls.fmt = '%1.3f'
lbleStd = wx.StaticText(self, label=' eStd')
self.boxeStd = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxeStd.SetFont(self.font)
self.boxeStd.fmt = '%1.3f'
lbleXpk = wx.StaticText(self, label=' eXpk')
self.boxeXpk = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxeXpk.SetFont(self.font)
self.boxeXpk.fmt = '%1.3f'
lbleMlt = wx.StaticText(self, label=' eMlt')
self.boxeMlt = wx.TextCtrl(self,wx.TE_RIGHT,size=boxSize)
self.boxeMlt.SetFont(self.font)
self.boxeMlt.fmt = '%1.3f'
###
# Placement
self.size1 = wx.BoxSizer(wx.HORIZONTAL)
self.size1.Add(lblLimits,0,wx.LEFT)
self.size1.AddStretchSpacer(1)
self.size1.Add(btnLimits,0,wx.RIGHT)
self.grid1 = wx.FlexGridSizer(rows=5,cols=3,hgap=5,vgap=5)
self.grid1.Add(lbltRange, 1,wx.LEFT)
self.grid1.Add(self.boxtRange0,1,wx.LEFT)
self.grid1.Add(self.boxtRange1,1,wx.LEFT)
self.grid1.Add(lblazRange, 1,wx.LEFT)
self.grid1.Add(self.boxazRange0,1,wx.LEFT)
self.grid1.Add(self.boxazRange1,1,wx.LEFT)
self.grid1.Add(lblelRange, 1,wx.LEFT)
self.grid1.Add(self.boxelRange0,1,wx.LEFT)
self.grid1.Add(self.boxelRange1,1,wx.LEFT)
self.grid1.Add(lblcaRange, 1,wx.LEFT)
self.grid1.Add(self.boxcaRange0,1,wx.LEFT)
self.grid1.Add(self.boxcaRange1,1,wx.LEFT)
self.grid1.Add(lblcbRange, 1,wx.LEFT)
self.grid1.Add(self.boxcbRange0,1,wx.LEFT)
self.grid1.Add(self.boxcbRange1,1,wx.LEFT)
self.size2 = wx.BoxSizer(wx.HORIZONTAL)
self.size2.Add(lblFilters,1,wx.LEFT)
self.size2.Add(btnFilters,0,wx.RIGHT)
self.grid2 = wx.FlexGridSizer(rows=2,cols=4,hgap=5,vgap=5)
self.grid2.Add(lbleCls,1,wx.LEFT)
self.grid2.Add(self.boxeCls,1,wx.LEFT)
self.grid2.Add(lbleStd,1,wx.LEFT)
self.grid2.Add(self.boxeStd,1,wx.LEFT)
self.grid2.Add(lbleXpk,1,wx.LEFT)
self.grid2.Add(self.boxeXpk,1,wx.LEFT)
self.grid2.Add(lbleMlt,1,wx.LEFT)
self.grid2.Add(self.boxeMlt,1,wx.LEFT)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddSizer(self.size1,1)
self.sizer.AddSizer(self.grid1,0)
self.sizer.AddSizer(self.size2,1)
self.sizer.AddSizer(self.grid2,0)
self.SetSizer(self.sizer)
self.Fit()
###
# Text Parser
def text_parser(self,S):
#first see if it's a number
try:
F = float(S.strip())
except:
return None
return F
###
# Limits
def OnBtnLimBack(self,e):
#this goes back in the history
#pop off the last set of limits
if len( self.root.plotPanel.limitsHistory ) > 0:
lims = self.root.plotPanel.limitsHistory.pop()
else:
return
#set these limits
self.root.plotPanel.SetLimits(save=False, **lims)
#update the plot
self.root.plotPanel.UpdatePlot()
def OnBtnLimits(self,e):
#this is ugly, I'm sure there's a better way to do it, but
#this should work
data = self.root.plotPanel.data
#Time
tRange = []
F = self.text_parser(self.boxtRange0.GetValue())
if F == None:
tRange.append( data.tRange[0] )
else:
tRange.append(F)
F = self.text_parser(self.boxtRange1.GetValue())
if F == None:
tRange.append( data.tRange[1] )
else:
tRange.append(F)
#Azimuth
azRange = []
F = self.text_parser(self.boxazRange0.GetValue())
if F == None:
azRange.append( data.azRange[0] )
else:
azRange.append(F)
F = self.text_parser(self.boxazRange1.GetValue())
if F == None:
azRange.append( data.azRange[1] )
else:
azRange.append(F)
#Elevation
elRange = []
F = self.text_parser(self.boxelRange0.GetValue())
if F == None:
elRange.append( data.elRange[0] )
else:
elRange.append(F)
F = self.text_parser(self.boxelRange1.GetValue())
if F == None:
elRange.append( data.elRange[1] )
else:
elRange.append(F)
#cosa
caRange = []
F = self.text_parser(self.boxcaRange0.GetValue())
if F == None:
caRange.append( data.caRange[0] )
else:
caRange.append(F)
F = self.text_parser(self.boxcaRange1.GetValue())
if F == None:
caRange.append( data.caRange[1] )
else:
caRange.append(F)
#cosb
cbRange = []
F = self.text_parser(self.boxcbRange0.GetValue())
if F == None:
cbRange.append( data.cbRange[0] )
else:
cbRange.append(F)
F = self.text_parser(self.boxcbRange1.GetValue())
if F == None:
cbRange.append( data.cbRange[1] )
else:
cbRange.append(F)
#set the limits
self.root.plotPanel.SetLimits(
caRange=caRange, cbRange=cbRange,
elRange=elRange, azRange=azRange,
tRange=tRange)
#make the changes and update the text values
self.set_values()
self.root.plotPanel.UpdatePlot()
###
# Filters
def OnBtnFilters(self,e):
F = self.text_parser(self.boxeCls.GetValue())
if F != None:
self.root.plotPanel.data.tCls = F
F = self.text_parser(self.boxeStd.GetValue())
if F != None:
self.root.plotPanel.data.tStd = F
F = self.text_parser(self.boxeXpk.GetValue())
if F != None:
self.root.plotPanel.data.tXpk = F
F = self.text_parser(self.boxeMlt.GetValue())
if F != None:
self.root.plotPanel.data.tMlt = F
#make the changes and update the text values
self.set_values()
self.root.plotPanel.data.filter()
self.root.plotPanel.UpdatePlot()
###
# Reset Values
def set_values(self):
if self.root.plotPanel.data == None:
return
data = self.root.plotPanel.data
self.boxtRange0.SetValue( (self.boxtRange0.fmt%data.tRange[0]).rjust(self.limitsLen) )
self.boxtRange1.SetValue( (self.boxtRange0.fmt%data.tRange[1]).rjust(self.limitsLen) )
self.boxazRange0.SetValue((self.boxazRange0.fmt%data.azRange[0]).rjust(self.limitsLen))
self.boxazRange1.SetValue((self.boxazRange0.fmt%data.azRange[1]).rjust(self.limitsLen))
self.boxelRange0.SetValue((self.boxelRange0.fmt%data.elRange[0]).rjust(self.limitsLen))
self.boxelRange1.SetValue((self.boxelRange0.fmt%data.elRange[1]).rjust(self.limitsLen))
self.boxcaRange0.SetValue((self.boxcaRange0.fmt%data.caRange[0]).rjust(self.limitsLen))
self.boxcaRange1.SetValue((self.boxcaRange0.fmt%data.caRange[1]).rjust(self.limitsLen))
self.boxcbRange0.SetValue((self.boxcbRange0.fmt%data.cbRange[0]).rjust(self.limitsLen))
self.boxcbRange1.SetValue((self.boxcbRange0.fmt%data.cbRange[1]).rjust(self.limitsLen))
self.boxeCls.SetValue((self.boxeCls.fmt%data.tCls).rjust(self.filtersLen))
self.boxeStd.SetValue((self.boxeStd.fmt%data.tStd).rjust(self.filtersLen))
self.boxeXpk.SetValue((self.boxeXpk.fmt%data.tXpk).rjust(self.filtersLen))
self.boxeMlt.SetValue((self.boxeMlt.fmt%data.tMlt).rjust(self.filtersLen))
class CtrlPanel(wx.Notebook):
"""Control Panel
I know, it's a notebook
Works largely as a container for the Tabs"""
def __init__(self, parent,root):
wx.Notebook.__init__(self,parent)
#parent is gonna be important for this one
self.parent = parent
self.root = root
#these are the tabs
self.fileTab = MainTab(self,self.root)
self.AddPage(self.fileTab, "Main")
self.filtTab = FilterTab(self,self.root)
self.AddPage(self.filtTab, "Limits")
self.overlayTab= OverlayTab(self,self.root)
self.AddPage(self.overlayTab, "Overlay")
class PlotPanel(wx.Panel):
"""Plot Panel
Contains the plot and some plotting based functions"""
def __init__(self, parent, root):
wx.Panel.__init__(self,parent)
self.parent = parent
self.root = root
#important paramenters
self.inFileS = None
self.data = None
#lma
self.lma = None
self.lmaColor= 'Black'
#waveforms
self.waveHead= None
self.waveData= None
self.waveLpf = 5 #waveform low pass filter
self.waveColor= 'Black'
#self.mskData = None
self.face = 'w'
self.txtc = 'k'
self.color = 'k'
self.colorMap= gCmap
self.colorOp = 1
self.colorHL = (0,1,0,.25)
self.alphaOp = 0
self.sizeOp = 1
self.markerSz= 6
self.marker = 'D'
self.cosine = True
self.maxA = np.log10(65000)
self.minA = np.log10( 700)
#qualities
self.eCls = 2.25
self.eXpk = 0.3
self.eMlt = .40
self.eStd = 2.0
self._draw_pending = False
self._draw_counter = 0
self.tOffset = 0
self.limitsHistory = []
self.SetBackgroundColour(wx.NamedColour("WHITE"))
self.figure = Figure(figsize=(8.0,4.0))
self.figure_canvas = FigureCanvas(self, -1, self.figure)
# Note that event is a MplEvent
self.figure_canvas.mpl_connect('motion_notify_event', self.UpdateStatusBar)
#self.figure_canvas.Bind(wx.EVT_ENTER_WINDOW, self.ChangeCursor)
#this status bar is actually part of the main frame
self.statusBar = wx.StatusBar(self.root, -1)
self.statusBar.SetFieldsCount(1)
self.root.SetStatusBar(self.statusBar)
self.mkPlot()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.figure_canvas, 1, wx.LEFT | wx.TOP | wx.EXPAND)
self.sizer.Add(self.statusBar, 0, wx.BOTTOM)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_SIZE,self.OnSize)
###
# Makers
def mkPlot(self, lims=None):
#clear the figure
self.figure.clf()
self.ax1Coll = None
self.ax2Coll = None
self.ax3Coll = None
self.ax1Lma = None
self.ax3Lma = None
self.ax3Wave = None
#clear the history
self.limitsHistory = [ ]
#clear the lma and wave data
self.lma = None
#waveforms
self.waveHead= None
self.waveData= None
#initialize the plot region
gs = gridspec.GridSpec(2, 2)
#axis #1, the Az-El (or cosa-cosb) plot
self.ax1 = self.figure.add_subplot(gs[:,0],axisbg=self.face)
self.ax1.yaxis.set_tick_params(labelcolor=self.txtc)
self.ax1.yaxis.set_tick_params(color=self.txtc)
self.ax1.xaxis.set_tick_params(labelcolor=self.txtc)
self.ax1.xaxis.set_tick_params(color=self.txtc)
#axis #2, the time-El plot (overview)
self.ax2 = self.figure.add_subplot(gs[0,1],axisbg=self.face)
self.ax2.yaxis.set_tick_params(labelcolor=self.txtc)
self.ax2.yaxis.set_tick_params(color=self.txtc)
self.ax2.xaxis.set_tick_params(labelcolor=self.txtc)
self.ax2.xaxis.set_tick_params(color=self.txtc)
#self.ax2b = self.figure.add_subplot(gs[0,1],sharex=self.ax2, sharey=self.ax2, frameon=False)
#axis #3, the time-El plot (Zoom)
self.ax3 = self.figure.add_subplot(gs[1,1],axisbg=self.face)
self.ax3.yaxis.set_tick_params(labelcolor=self.txtc)
self.ax3.yaxis.set_tick_params(color=self.txtc)
self.ax3.xaxis.set_tick_params(labelcolor=self.txtc)
self.ax3.xaxis.set_tick_params(color=self.txtc)
SelectorColor = self.colorHL
self.span_ax1 = RectangleSelector(self.ax1, self.OnSelectAx1,
minspanx=0.01, minspany=0.01,
rectprops=dict(facecolor=self.colorHL, alpha=0.25),useblit=True)
self.span_ax2 = SpanSelector(self.ax2, self.OnSelectAx2, 'horizontal',\
rectprops=dict(facecolor=self.colorHL, alpha=0.25),useblit=True,minspan=0.01)
self.span_ax3 = SpanSelector(self.ax3, self.OnSelectAx3, 'horizontal',\
rectprops=dict(facecolor=self.colorHL, alpha=0.25),useblit=True,minspan=0.01)
########
# Make the plot
if self.data == None:
return
#initialize all the ranges
self.data.reset_limits()
self.data.update()
self.mkColorMap()
print 'Making New Plot'
#make the title
self.title = self.figure.suptitle( self.data.TriggerTimeS )
#Main Plot
if self.cosine:
print 'Cosine Projection'
theta = np.linspace(0,2*np.pi,1000)
X = np.cos(theta)
Y = np.sin(theta)
self.ax1Coll = self.ax1.scatter(
self.data.cosb, self.data.cosa,
s=self.markerSz,
marker=self.marker,
facecolor=self.color,
edgecolor='None' )
self.ax1.plot(X,Y,'k-', linewidth=2)
self.ax1.plot(np.cos(30*np.pi/180)*X,np.cos(30*np.pi/180)*Y,'k--', linewidth=2)
self.ax1.plot(np.cos(60*np.pi/180)*X,np.cos(60*np.pi/180)*Y,'k--', linewidth=2)
self.ax1.set_xlabel('cos($\\alpha$)')
self.ax1.set_ylabel('cos($\\beta$)')
self.ax1.set_xlim( self.data.cbRange )
self.ax1.set_ylim( self.data.caRange )
self.ax1.set_aspect('equal')
else:
print 'Az-El Projection'
self.ax1Coll = self.ax1.scatter(
self.data.azim,
self.data.elev,
s=self.markerSz,
marker=self.marker,
facecolor=self.color,
edgecolor='None' )
self.ax1.set_xlim( self.data.azRange )
self.ax1.set_ylim( self.data.elRange )
self.ax1.set_ylabel('Elevation')
self.ax1.set_xlabel('Azimuth')
self.ax1.set_aspect('auto')
#the zoomed plot
self.ax3Coll = self.ax3.scatter(
self.data.time,
self.data.elev,
s=self.markerSz,
marker=self.marker,
facecolor=self.color,
edgecolor='None' )
self.ax3.set_xlim( self.data.tRange )
self.ax3.set_ylim( self.data.elRange )
self.ax3.set_xlabel('Time (ms)')
#the overview plot
self.ax2.pcolormesh( self.data.rawDataHist[2],
self.data.rawDataHist[1], self.data.rawDataHist[0]**.1,
edgecolor='None',cmap=cm.binary)
self.ax2Coll = self.ax2.scatter(
self.data.time,
self.data.elev,
s=3,
marker=self.marker,
facecolor=self.colorHL,
edgecolor='None' )
#these limits shouldn't change though
self.ax2.set_xlim( self.data.tRange )
self.ax2.set_ylim( self.data.elRange )
self.root.ctrlPanel.filtTab.set_values()
self.redraw()
def mkColorMap(self):
"""Makes a colormap"""
print 'Color:',
#most color maps use static sizing
if self.data == None:
return
if self.colorOp == 0:
print 'Greyscale'
self.data.sort( self.data.time )
#none
self.color = np.zeros( (len(self.data.mask),4) )
elif self.colorOp == 1:
#time
print 'By time'
self.data.sort( self.data.time )
c = self.data.time - self.data.time.min()
c /= c.max()
self.color = self.colorMap( c )
elif self.colorOp == 2:
#points
print 'by points'
self.data.sort( self.data.time )
c = np.arange( len(self.data.mask), dtype='f' )
c /=max(c)
self.color = self.colorMap( c )
elif self.colorOp == 3:
#amplitude
print 'by Amplitude'
self.data.sort( self.data.pkpk )
aMin = np.log10( self.data.a05 )
aMax = np.log10( self.data.a95 )
c = np.log10(self.data.pkpk)
c = (c-aMin)/(aMax-aMin)
c[c>1] = 1
self.color = self.colorMap( c )
self.mkAlpha()
def mkSize(self):
print 'MarkerSize:',
if self.sizeOp == 0:
#small
print 'small'
self.markerSz = 3
elif self.sizeOp == 1:
#medium
print 'medium'
self.markerSz = 6
elif self.sizeOp == 2:
#large
print 'large'
self.markerSz = 12
elif self.sizeOp == 3:
#size by amplitude
print 'by Amplitude'
s = np.log10( self.data.pkpk )
s = (s-self.minA)/(self.maxA-self.minA)
s[s>1] = 1
s = (1+3*s**2)
self.markerSz = 6*s
elif self.sizeOp == 4:
#exagerated size by ampltiude
print 'exagerated'
s = np.log10( self.data.pkpk )
aMin = np.log10(self.data.aMin)
aMax = np.log10(self.data.aMax)
s = (s-aMin)/(aMax-aMin)
s[s>1] = 1
s = (1+3*s**2)**2
self.markerSz = 6*s
def mkAlpha(self):
print 'Alpha:',
if self.alphaOp == 0:
#no alpha
print 'None'
self.color[:,3] = 1
return
elif self.alphaOp == 1:
#some alpha
print '0.2'
alphaEx = .2
elif self.alphaOp == 2:
#more alpha
print '0.4'
alphaEx = .4
else:
#don't know this option, don't do anything
return
a = self.data.pkpk.copy()
a -= min(a)
a /= max(a)
self.color[:,3] = a**alphaEx
###
#On Catches
def OnSelectAx1(self,click, release):
xlims = [click.xdata, release.xdata]
xlims.sort()
ylims = [click.ydata, release.ydata]
ylims.sort()
if self.cosine:
self.SetLimits(caRange=ylims, cbRange=xlims)
else:
self.SetLimits(elRange=ylims, azRange=xlims)
#update the plots
self.UpdatePlot()
def OnSelectAx2(self,xmin,xmax):
self.figure_canvas.draw()
if self.data == None:
return
self.SetLimits(tRange=[xmin,xmax])
#update the mask and plot
self.UpdatePlot()
def OnSelectAx3(self,xmin,xmax):
#mask the data array
if self.data == None:
return
self.SetLimits(tRange=[xmin,xmax])
#update the mask and plot
self.UpdatePlot()
def OnSize(self,e):
if self.GetAutoLayout():
self.Layout()
left = 60
right = 30
top = 30
bottom = 40
wspace = 100
dpi = self.figure.dpi
h = self.figure.get_figheight()*dpi
w = self.figure.get_figwidth()*dpi
#figure out the margins
self.figure.subplots_adjust(left=left/w,
right=1-right/w,
bottom=bottom/h,
top=1-top/h,
wspace=wspace/w)
self.redraw()
###
#Updaters
def UpdateStatusBar(self, event):
if event.inaxes:
x, y = event.xdata, event.ydata
self.statusBar.SetStatusText(( "x= " + str(x) +
" y=" +str(y) ),
0)
#~ def UpdateMask(self):
#~ if self.data == None:
#~ return
#~
#~ self.data.mask = np.where(
#~ (self.data.time>=self.tRange[ 0])&(self.data.time<=self.tRange[ 1])&
#~ (self.data.azim>=self.azRange[0])&(self.data.azim<=self.azRange[1])&
#~ (self.data.elev>=self.elRange[0])&(self.data.elev<=self.elRange[1])&
#~ (self.data.cosa>=self.caRange[0])&(self.data.cosa<=self.caRange[1])&
#~ (self.data.cosb>=self.cbRange[0])&(self.data.cosb<=self.cbRange[1]) )[0]
def OffsetLimits(self,offset):
"""OffsetLimits(self,offset)
this comes up because sometimes you want the time from the second,
and sometimes you want the time from the trigger.
This takes care of updating the time limits history so that
things refer to the same section of the flash
"""
for i in range(len(self.limitsHistory)):
if not 'tRange' in self.limitsHistory[i]:
#can this even happen?
continue
self.limitsHistory[i]['tRange'][0] += offset - self.tOffset
self.limitsHistory[i]['tRange'][1] += offset - self.tOffset
#update the waveform
if self.waveData != None:
self.waveData[0,:] += offset - self.tOffset
self.tOffset = offset
def GetLimits(self):
if self.data == None:
#no nothing
return
#the limits get stored in a dictionary
lims = {}
lims['caRange'] = self.data.caRange
lims['cbRange'] = self.data.cbRange
lims['elRange'] = self.data.elRange
lims['azRange'] = self.data.azRange
lims['tRange'] = self.data.tRange
return lims
def SetLimits(self, caRange=None, cbRange=None,
elRange=None, azRange=None, tRange=None, save=True ):
if self.data == None:
#Do Nothing
return
#append the old limits to the history
if self.limitsHistory != None and save:
self.limitsHistory.append(self.GetLimits())
#the limits that aren't passed aren't changed,
#get them from the data and store in history
lims = {}
if caRange != None:
self.data.caRange=caRange
if cbRange != None:
self.data.cbRange=cbRange
if elRange != None:
self.data.elRange=elRange
if azRange != None:
self.data.azRange=azRange
if tRange != None:
self.data.tRange=tRange
def UpdatePlot(self, update_overview=False):
"""redraws the main axis
if update_overview=True, also redraws the upper righthand plot"""
if self.data == None:
return
self.data.limits()
self.data.update()
self.mkColorMap()
self.mkSize()
#Main plot (remake)
if self.ax1Coll != None:
self.ax1Coll.remove()
if self.ax1Lma != None:
self.ax1Lma.remove()
if self.cosine:
print 'Cosine Projection'
self.ax1Coll = self.ax1.scatter(
self.data.cosb,
self.data.cosa,
s=self.markerSz,
marker=self.marker,
facecolor=self.color,
edgecolor='None' )
if self.lma != None:
self.ax1Lma = self.ax1.scatter(
self.lma.cosb,
self.lma.cosa,
s=6,
marker=self.marker,
facecolor=self.lmaColor,
edgecolor='None' )
self.ax1.set_ylabel('cosa')
self.ax1.set_xlabel('cosb')
self.ax1.set_ylim( self.data.caRange )
self.ax1.set_xlim( self.data.cbRange )
self.ax1.set_aspect('equal')
else:
print 'Az-El Projection'
self.ax1Coll = self.ax1.scatter(
self.data.azim,
self.data.elev,
s=self.markerSz,
marker=self.marker,
facecolor=self.color,
edgecolor='None' )
if self.lma != None:
self.ax1Lma = self.ax1.scatter(
self.lma.azim,
self.lma.elev,
s=6,
marker=self.marker,
facecolor=self.lmaColor,
edgecolor='None' )
self.ax1.set_xlim( self.data.azRange )
self.ax1.set_ylim( self.data.elRange )
self.ax1.set_ylabel('Elevation')
self.ax1.set_xlabel('Azimuth')
self.ax1.set_aspect('auto')
#Zoom plot (remake)
if self.ax3Coll != None:
self.ax3Coll.remove()
if self.ax3Lma != None:
self.ax3Lma.remove()
if self.ax3Wave != None:
self.ax3Wave.remove()
#plot waveforms?
if self.waveHead != None:
#this starts with a complicated conditional to determine if
#we need to reload the waveform. this should be avoided, as
#it takes a while, especially for longer durations
if self.waveData == None:
self.readWave()
elif ( self.waveData[0,0] - self.data.tRange[0] < 0.1 ) and \
( self.waveData[0,-1]- self.data.tRange[1] > -0.1 ) and \
( self.waveData[0,-1]-self.waveData[0,0] <
1.5*(self.data.tRange[1]-self.data.tRange[0])):
#we don't need to read the data
pass
else:
print self.waveData[0,0], self.data.tRange[0], self.waveData[0,0] - self.data.tRange[0] < 0.1
print self.waveData[0,-1], self.data.tRange[1],self.waveData[0,-1]- self.data.tRange[1] > -0.1
print self.waveData[0,-1]-self.waveData[0,0], 1.5*(self.data.tRange[1]-self.data.tRange[0]), ( self.waveData[0,-1]-self.waveData[0,0] < 1.5*(self.data.tRange[1]-self.data.tRange[0]))
self.readWave()
#plot the data
self.ax3Wave, = self.ax3.plot(
self.waveData[0,:],
self.waveData[1,:],
self.waveColor,
zorder=-10 )
#Scatter INTF
self.ax3Coll = self.ax3.scatter(
self.data.time,
self.data.elev,
s=self.markerSz,
marker=self.marker,
facecolor=self.color,
edgecolor=(1,1,1,0) )
#plot LMA?
if self.lma != None:
self.ax3Lma = self.ax3.scatter(
self.lma.time,
self.lma.elev,
s = 6,
marker = self.marker,
facecolor=self.lmaColor,
edgecolor='None' )
self.ax3.set_xlim( self.data.tRange )
self.ax3.set_ylim( self.data.elRange )
self.ax3.set_xlabel('Time (ms)')
#overview plot
#Remake current stuff only
if self.ax2Coll != None:
self.ax2Coll.remove()
if update_overview:
#the overview plot likely just moved to a new location
#reset the limits
self.ax2.set_xlim( self.data.tStart+self.data.tOffset,
self.data.tStop+ self.data.tOffset )
self.ax2.pcolormesh( self.data.rawDataHist[2],
self.data.rawDataHist[1], self.data.rawDataHist[0]**.1,
edgecolor='None',cmap=cm.binary)
self.ax2Coll = self.ax2.scatter(
self.data.time,
self.data.elev,
s=3,
marker=self.marker,
facecolor=self.colorHL,
edgecolor='None' )
print "redrawing figure"
self.root.ctrlPanel.filtTab.set_values()
self.redraw()
def readWave(self):
#get the start sample, surprisingly difficult this
#t = (iMax-Settings.preTriggerSamples)/1000./Settings.sampleRate
#t*sRage*1000+preTrig = iMax
sRate = self.data.header.SampleRate
pSamp = self.data.header.PreTriggerSamples
sSam = int( (self.data.tRange[0]-self.data.tOffset)*sRate/1000+pSamp )
#get the number of samples, not so hard
numSam = int( (self.data.tRange[1]-self.data.tRange[0])/1000.*sRate )
#read in wave data and plot it under
self.waveData = it.read_raw_waveform_file_data( self.root.waveFileS,
self.waveHead,
sSam,
numSam,
lpf=self.waveLpf )
#normalize the wavedata
self.waveData[1,:] -= min( self.waveData[1,:] )
self.waveData[1,:] /= max( self.waveData[1,:] )
self.waveData[1,:] *= self.data.elRange[1]-self.data.elRange[0]
self.waveData[1,:] += self.data.elRange[0]
self.waveData[0,:] += self.data.tOffset
def redraw(self):
if self._draw_pending:
self._draw_counter += 1
return
def _draw():
self.figure_canvas.draw()
self._draw_pending = False
if self._draw_counter > 0:
self._draw_counter = 0
self.redraw()
wx.CallLater(40, _draw).Start()
self._draw_pending = True
class MainFrame(wx.Frame):
"""Main Frame
All the things are stored in this"""
def __init__(self, ):
wx.Frame.__init__(self,None,-1, 'INTF Plot',size=(550,350))
#the only paramenters stored in the frame are about the file
self.inFileS = None
self.waveFileS = None
self.lmaFileS = None
self.lmaFile = None
#these are the main 2 panels
self.plotPanel = PlotPanel(self,self) #self is the parent and root
self.ctrlPanel = CtrlPanel(self,self)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.plotPanel,1,wx.LEFT | wx.GROW)
sizer.Add(self.ctrlPanel,0,wx.RIGHT)
self.SetSizer(sizer)
self.Fit()
def OpenFile(self,inFileS):
print 'reading data',inFileS
self.inFileS = inFileS
self.lmaFileS = None
self.waveFileS = None
self.plotPanel.waveHead = None
self.plotPanel.waveData = None
self.plotPanel.data = it.read_data_file(inFileS)
if self.ctrlPanel.fileTab.chkTime.GetValue():
print 't_offset', self.plotPanel.data.time_from_second()
print 'making plot'
self.plotPanel.mkPlot()
#self.plotPanel.UpdatePlot()
def OpenLma(self,inFileS):
self.lmaFile = inFileS
self.plotPanel.lma = it.LmaData(inFileS)
self.plotPanel.UpdatePlot()
def OpenWave(self,inFileS):
self.waveFileS = inFileS
self.plotPanel.waveData = None
try:
self.plotPanel.waveHead = it.RawHeader(inFileS)
except:
self.plotPanel.waveHead = None
self.plotPanel.UpdatePlot()
class App(wx.App):
def OnInit(self):
'Create the main window and insert the custom frame'
frame = MainFrame()
self.SetTopWindow(frame)
frame.Show(True)
return True
if __name__=='__main__':
app = App(0)
app.MainLoop()
| gpl-2.0 |
dchad/malware-detection | vs/generate_elf_header_tokens.py | 1 | 4984 | # generate-elf-header-tokens.py
#
# Parse a bunch of ELF Header dump files generated by objdump and
# extract keywords such section names, import libs and functions.
# These tokens will be used for feature extraction from the
# ELF headers.
#
# Inputs : list of ELF Header files.
# Temp file name for token counts.
# File name for combined token counts.
#
# Outputs: elf-header-tokens.txt
# elf-header-token-counts.csv
# row format = [token_name, count]
#
# Author: Derek Chadwick
# Date : 17/09/2016
import os
from csv import writer
import numpy as np
import pandas as pd
import re
elf_header_names = ['Magic:','Class:','Data:','Version:','OS/ABI:','ABI Version:','Type:','Machine:','Version:',
'Entry point address:','Start of program headers:','Start of section headers:','Flags:',
'Size of this header:','Size of program headers:','Number of program headers:',
'Size of section headers:','Number of section headers:','Section header string table index:']
def save_token_counts(token_counter_map, out_file_name):
# Output the PE Header token counts.
pid = os.getpid()
out_file = "data/" + str(pid) + "-" + out_file_name
fop = open(out_file, 'w')
csv_wouter = writer(fop)
outlines = []
sorted_keys = token_counter_map.keys()
sorted_keys.sort()
counter = 0
for key in sorted_keys:
outlines.append([key, token_counter_map[key]])
counter += 1
if (counter % 100) == 0: # write out some lines
csv_wouter.writerows(outlines)
outlines = []
print("Processed token {:s} -> {:d}.".format(key, token_counter_map[key]))
# Finish off.
if (len(outlines) > 0):
csv_wouter.writerows(outlines)
outlines = []
print("Completed writing {:d} tokens.".format(len(sorted_keys)))
fop.close()
return
def get_token_count_map(token_df):
# Read in the token count file and create a dict.
token_dict = {}
type_y = np.array(token_df['token_name'])
for idx in range(token_df.shape[0]): # First fill the dict with the token counts
token_dict[token_df.iloc[idx,0]] = token_df.iloc[idx,1]
return token_dict
def generate_elf_tokens(mp_params):
# Parse a bunch of ELF headers dumped by objdump and extract
# section names and other useful information.
file_list = mp_params.file_list
out_count_file = mp_params.count_file
psections = re.compile('\s+\[\d{1,2}\]\s+(\.\w+|\w+)\s+') # Pattern for section names.
pfunctions = re.compile('\s+\w+\s+\d{1,4}\s+(.+)\s*') # Pattern for import function names.
token_counter_map = {}
counter = 0
pid = os.getpid()
for idx, fname in enumerate(file_list):
fip = open(fname, 'r')
in_lines = fip.readlines()
counter += 1
for line in in_lines:
line = line.rstrip() # get rid of newlines they are annoying.
token_val = ""
m = psections.match(line)
if m != None:
token_val = m.group(1)
#print("Section: {:s}".format(token_val))
else:
m = pfunctions.match(line)
if m != None:
token_val = m.group(1)
else:
continue
# Clean the token name, the function name regex is picking up random crap.
idx = token_val.find('\t')
if idx > 0:
token_val = token_val[0:idx]
# Count the token type.
if token_val in token_counter_map.keys():
token_counter_map[token_val] += 1
else:
token_counter_map[token_val] = 1
if (counter % 1000) == 0:
print("{:d} Processed {:d} header files.".format(pid, counter))
fip.close()
save_token_counts(token_counter_map, out_count_file)
return
class Multi_Params(object):
def __init__(self, tokenfile="", countfile="", filelist=[]):
self.token_file = tokenfile
self.count_file = countfile
self.file_list = filelist
# Start of script.
#TODO: parse command line options for input/output file names.
#token_file = 'elf-header-tokens-vs263.csv'
#count_file = 'elf-header-token-counts-vs263.csv'
#ext_drive = '/opt/vs/train3hdr/'
token_file = 'elf-header-tokens-vs264.txt'
count_file = 'elf-header-token-counts-vs264.csv'
#ext_drive = '/opt/vs/train4hdr/'
ext_drive = '/home/derek/project/temp/'
file_list = os.listdir(ext_drive)
tfiles = []
for fname in file_list:
fname = fname.rstrip()
if fname.endswith('.elf.txt'):
tfiles.append(ext_drive + fname)
print("Files: {:d}".format(len(tfiles)))
mp1 = Multi_Params(token_file, count_file, tfiles)
generate_elf_tokens(mp1)
# End of Script.
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/tests/test_compare_images.py | 15 | 3854 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import shutil
from nose.tools import assert_equal, assert_not_equal, assert_almost_equal
from matplotlib.testing.compare import compare_images
from matplotlib.testing.decorators import _image_directories
baseline_dir, result_dir = _image_directories(lambda: 'dummy func')
# Tests of the image comparison algorithm.
def image_comparison_expect_rms(im1, im2, tol, expect_rms):
"""Compare two images, expecting a particular RMS error.
im1 and im2 are filenames relative to the baseline_dir directory.
tol is the tolerance to pass to compare_images.
expect_rms is the expected RMS value, or None. If None, the test will
succeed if compare_images succeeds. Otherwise, the test will succeed if
compare_images fails and returns an RMS error almost equal to this value.
"""
im1 = os.path.join(baseline_dir, im1)
im2_src = os.path.join(baseline_dir, im2)
im2 = os.path.join(result_dir, im2)
# Move im2 from baseline_dir to result_dir. This will ensure that
# compare_images writes the diff file to result_dir, instead of trying to
# write to the (possibly read-only) baseline_dir.
shutil.copyfile(im2_src, im2)
results = compare_images(im1, im2, tol=tol, in_decorator=True)
if expect_rms is None:
assert_equal(None, results)
else:
assert_not_equal(None, results)
assert_almost_equal(expect_rms, results['rms'], places=4)
def test_image_compare_basic():
#: Test comparison of an image and the same image with minor differences.
# This expects the images to compare equal under normal tolerance, and have
# a small RMS.
im1 = 'basn3p02.png'
im2 = 'basn3p02-minorchange.png'
image_comparison_expect_rms(im1, im2, tol=10, expect_rms=None)
# Now test with no tolerance.
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=6.50646)
def test_image_compare_1px_offset():
#: Test comparison with an image that is shifted by 1px in the X axis.
im1 = 'basn3p02.png'
im2 = 'basn3p02-1px-offset.png'
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=90.15611)
def test_image_compare_half_1px_offset():
#: Test comparison with an image with half the pixels shifted by 1px in
#: the X axis.
im1 = 'basn3p02.png'
im2 = 'basn3p02-half-1px-offset.png'
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=63.75)
def test_image_compare_scrambled():
#: Test comparison of an image and the same image scrambled.
# This expects the images to compare completely different, with a very
# large RMS.
# Note: The image has been scrambled in a specific way, by having each
# color component of each pixel randomly placed somewhere in the image. It
# contains exactly the same number of pixels of each color value of R, G
# and B, but in a totally different position.
im1 = 'basn3p02.png'
im2 = 'basn3p02-scrambled.png'
# Test with no tolerance to make sure that we pick up even a very small RMS
# error.
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=172.63582)
def test_image_compare_shade_difference():
#: Test comparison of an image and a slightly brighter image.
# The two images are solid color, with the second image being exactly 1
# color value brighter.
# This expects the images to compare equal under normal tolerance, and have
# an RMS of exactly 1.
im1 = 'all127.png'
im2 = 'all128.png'
image_comparison_expect_rms(im1, im2, tol=0, expect_rms=1.0)
# Now test the reverse comparison.
image_comparison_expect_rms(im2, im1, tol=0, expect_rms=1.0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
jaidevd/scikit-learn | examples/plot_compare_reduction.py | 19 | 2489 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of GridSearchCV and
Pipeline to optimize over different classes of estimators in a
single CV run -- unsupervised PCA and NMF dimensionality
reductions are compared to univariate feature selection during
the grid search.
"""
# Authors: Robert McGibbon, Joel Nothman
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
print(__doc__)
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=2, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/testing/jpl_units/EpochConverter.py | 23 | 5479 | #===========================================================================
#
# EpochConverter
#
#===========================================================================
"""EpochConverter module containing class EpochConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.units as units
import matplotlib.dates as date_ticker
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'EpochConverter' ]
#===========================================================================
class EpochConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for Monte Epoch and Duration classes.
"""
# julian date reference for "Jan 1, 0001" minus 1 day because
# matplotlib really wants "Jan 0, 0001"
jdRef = 1721425.5 - 1
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
majloc = date_ticker.AutoDateLocator()
majfmt = date_ticker.AutoDateFormatter( majloc )
return units.AxisInfo( majloc = majloc,
majfmt = majfmt,
label = unit )
#------------------------------------------------------------------------
@staticmethod
def float2epoch( value, unit ):
""": Convert a matplotlib floating-point date into an Epoch of the
specified units.
= INPUT VARIABLES
- value The matplotlib floating-point date.
- unit The unit system to use for the Epoch.
= RETURN VALUE
- Returns the value converted to an Epoch in the sepcified time system.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
secPastRef = value * 86400.0 * U.UnitDbl( 1.0, 'sec' )
return U.Epoch( unit, secPastRef, EpochConverter.jdRef )
#------------------------------------------------------------------------
@staticmethod
def epoch2float( value, unit ):
""": Convert an Epoch value to a float suitible for plotting as a
python datetime object.
= INPUT VARIABLES
- value An Epoch or list of Epochs that need to be converted.
- unit The units to use for an axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
return value.julianDate( unit ) - EpochConverter.jdRef
#------------------------------------------------------------------------
@staticmethod
def duration2float( value ):
""": Convert a Duration value to a float suitible for plotting as a
python datetime object.
= INPUT VARIABLES
- value A Duration or list of Durations that need to be converted.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
return value.days()
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for an axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotEpoch = True
isDuration = False
if ( iterable(value) and not isinstance(value, six.string_types) ):
if ( len(value) == 0 ):
return []
else:
return [ EpochConverter.convert( x, unit, axis ) for x in value ]
if ( isinstance(value, U.Epoch) ):
isNotEpoch = False
elif ( isinstance(value, U.Duration) ):
isDuration = True
if ( isNotEpoch and not isDuration and
units.ConversionInterface.is_numlike( value ) ):
return value
if ( unit == None ):
unit = EpochConverter.default_units( value, axis )
if ( isDuration ):
return EpochConverter.duration2float( value )
else:
return EpochConverter.epoch2float( value, unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
"""
frame = None
if ( iterable(value) and not isinstance(value, six.string_types) ):
return EpochConverter.default_units( value[0], axis )
else:
frame = value.frame()
return frame
| gpl-2.0 |
larroy/mxnet | example/kaggle-ndsb1/gen_img_list.py | 10 | 6986 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import csv
import os
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in range(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
RondaStrauch/landlab | landlab/components/potentiality_flowrouting/examples/test_script_fr.py | 6 | 5520 | # -*- coding: utf-8 -*-
"""
A script of VV's potentiality flow routing method.
Created on Fri Feb 20 13:45:52 2015
@author: danhobley
"""
from __future__ import print_function
from six.moves import range
#from landlab import RasterModelGrid
#from landlab.plot.imshow import imshow_node_grid
import numpy as np
from pylab import imshow, show, contour, figure, clabel
from matplotlib.ticker import MaxNLocator
sqrt = np.sqrt
n = 50
#mg = RasterModelGrid(n, n, 1.)
nt = 20000
width = 1.
p_thresh = 0.000001
core = (slice(1,-1),slice(1,-1))
timp=np.zeros((nt, 2), dtype=int)
dtwidth = 0.2
hR = np.zeros((n+2,n+2), dtype=float)
pR=np.zeros_like(hR)
p=pR.view().ravel()
qsourceR=np.zeros_like(hR)
qsource=qsourceR[core].view().ravel()
qspR=np.zeros_like(hR)
qsp = qspR[core].view().ravel()
qsourceR[core][0,-1]=.9*sqrt(2.)*.33
qsourceR[core][0,0]=.9*sqrt(2.)
qsourceR[core][n-1,n//2-1]=1
qspR[core][0,-1]=sqrt(2)
qspR[core][0,0]=sqrt(2)
qspR[core][n-1,n//2-1]=1
slope=0.1
hgradEx = np.zeros_like(hR)
hgradWx = np.zeros_like(hR)
hgradNx = np.zeros_like(hR)
hgradSx = np.zeros_like(hR)
pgradEx = np.zeros_like(hR)
pgradWx = np.zeros_like(hR)
pgradNx = np.zeros_like(hR)
pgradSx = np.zeros_like(hR)
hgradEy = np.zeros_like(hR)
hgradWy = np.zeros_like(hR)
hgradNy = np.zeros_like(hR)
hgradSy = np.zeros_like(hR)
pgradEy = np.zeros_like(hR)
pgradWy = np.zeros_like(hR)
pgradNy = np.zeros_like(hR)
pgradSy = np.zeros_like(hR)
CslopeE = np.zeros_like(hR)
CslopeW = np.zeros_like(hR)
CslopeN = np.zeros_like(hR)
CslopeS = np.zeros_like(hR)
thetaE = np.zeros_like(hR)
thetaW = np.zeros_like(hR)
thetaN = np.zeros_like(hR)
thetaS = np.zeros_like(hR)
vE = np.zeros_like(hR)
vW = np.zeros_like(hR)
vN = np.zeros_like(hR)
vS = np.zeros_like(hR)
#set up slice offsets:
Es = (slice(1,-1),slice(2,n+2))
NEs = (slice(2,n+2),slice(2,n+2))
Ns = (slice(2,n+2),slice(1,-1))
NWs = (slice(2,n+2),slice(0,-2))
Ws = (slice(1,-1),slice(0,-2))
SWs = (slice(0,-2),slice(0,-2))
Ss = (slice(0,-2),slice(1,-1))
SEs = (slice(0,-2),slice(2,n+2))
for i in range(nt):
if i%100==0:
print(i)
qE = np.zeros_like(hR)
qW = np.zeros_like(hR)
qN = np.zeros_like(hR)
qS = np.zeros_like(hR)
#update the dummy edges of our variables:
hR[0,1:-1] = hR[1,1:-1]
hR[-1,1:-1] = hR[-2,1:-1]
hR[1:-1,0] = hR[1:-1,1]
hR[1:-1,-1] = hR[1:-1,-2]
pR[0,1:-1] = pR[1,1:-1]
pR[-1,1:-1] = pR[-2,1:-1]
pR[1:-1,0] = pR[1:-1,1]
pR[1:-1,-1] = pR[1:-1,-2]
hgradEx[core] = (hR[core]-hR[Es])#/width
hgradEy[core] = hR[SEs]-hR[NEs]+hR[Ss]-hR[Ns]
hgradEy[core] *= 0.25
CslopeE[core] = sqrt(np.square(hgradEx[core])+np.square(hgradEy[core]))
thetaE[core] = np.arctan(np.fabs(hgradEy[core])/(np.fabs(hgradEx[core])+1.e-10))
pgradEx[core] = (pR[core]-pR[Es])#/width
pgradEy[core] = pR[SEs]-pR[NEs]+pR[Ss]-pR[Ns]
pgradEy[core] *= 0.25
vE[core] = sqrt(np.square(pgradEx[core])+np.square(pgradEy[core]))
qE[core] = np.sign(hgradEx[core])*vE[core]*(CslopeE[core]-slope).clip(0.)*np.cos(thetaE[core])
###the clip should deal with the eastern edge, but return here to check if probs
hgradWx[core] = (hR[Ws]-hR[core])#/width
hgradWy[core] = hR[SWs]-hR[NWs]+hR[Ss]-hR[Ns]
hgradWy[core] *= 0.25
CslopeW[core] = sqrt(np.square(hgradWx[core])+np.square(hgradWy[core]))
thetaW[core] = np.arctan(np.fabs(hgradWy[core])/(np.fabs(hgradWx[core])+1.e-10))
pgradWx[core] = (pR[Ws]-pR[core])#/width
pgradWy[core] = pR[SWs]-pR[NWs]+pR[Ss]-pR[Ns]
pgradWy[core] *= 0.25
vW[core] = sqrt(np.square(pgradWx[core])+np.square(pgradWy[core]))
qW[core] = np.sign(hgradWx[core])*vW[core]*(CslopeW[core]-slope).clip(0.)*np.cos(thetaW[core])
hgradNx[core] = hR[NWs]-hR[NEs]+hR[Ws]-hR[Es]
hgradNx[core] *= 0.25
hgradNy[core] = (hR[core]-hR[Ns])#/width
CslopeN[core] = sqrt(np.square(hgradNx[core])+np.square(hgradNy[core]))
thetaN[core] = np.arctan(np.fabs(hgradNy[core])/(np.fabs(hgradNx[core])+1.e-10))
pgradNx[core] = pR[NWs]-pR[NEs]+pR[Ws]-pR[Es]
pgradNx[core] *= 0.25
pgradNy[core] = (pR[core]-pR[Ns])#/width
vN[core] = sqrt(np.square(pgradNx[core])+np.square(pgradNy[core]))
qN[core] = np.sign(hgradNy[core])*vN[core]*(CslopeN[core]-slope).clip(0.)*np.sin(thetaN[core])
hgradSx[core] = hR[SWs]-hR[SEs]+hR[Ws]-hR[Es]
hgradSx[core] *= 0.25
hgradSy[core] = (hR[Ss]-hR[core])#/width
CslopeS[core] = sqrt(np.square(hgradSx[core])+np.square(hgradSy[core]))
thetaS[core] = np.arctan(np.fabs(hgradSy[core])/(np.fabs(hgradSx[core])+1.e-10))
pgradSx[core] = pR[SWs]-pR[SEs]+pR[Ws]-pR[Es]
pgradSx[core] *= 0.25
pgradSy[core] = (pR[Ss]-pR[core])#/width
vS[core] = sqrt(np.square(pgradSx[core])+np.square(pgradSy[core]))
qS[core] = np.sign(hgradSy[core])*vS[core]*(CslopeS[core]-slope).clip(0.)*np.sin(thetaS[core])
hR[core] += dtwidth*(qS[core]+qW[core]-qN[core]-qE[core]+qsourceR[core])
#while 1:
#mask for which core nodes get updated:
mask = (hR[core]<p_thresh)
for j in range(100):
pR[core] = pR[Ns]+pR[Ss]+pR[Es]+pR[Ws]+qspR[core]
pR[core] *= 0.25
pR[core][mask] = 0.
X,Y = np.meshgrid(np.arange(n),np.arange(n))
#imshow_node_grid(mg, h)
figure(1)
f1 = imshow(hR[core])
figure(2)
f2 = contour(X,Y,hR[core], locator=MaxNLocator(nbins=100))
clabel(f2)
figure(3)
f3 = contour(X,Y,pR[core], locator=MaxNLocator(nbins=100))
clabel(f3)
figure(4)
contour(X,Y,hR[core], locator=MaxNLocator(nbins=100))
contour(X,Y,pR[core], locator=MaxNLocator(nbins=100))
| mit |
shahankhatch/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.